From ef3e938fed0f2a5e81e3c2d8fc549e0e0eed369b Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Fri, 6 May 2022 11:20:49 -0700 Subject: [PATCH] Release v1.44.9 (2022-05-06) (#4392) Release v1.44.9 (2022-05-06) === ### Service Client Updates * `service/ec2`: Updates service API * Add new state values for IPAMs, IPAM Scopes, and IPAM Pools. * `service/location`: Updates service API, documentation, and paginators * `service/mediapackage`: Updates service API and documentation * This release adds Dvb Dash 2014 as an available profile option for Dash Origin Endpoints. * `service/rds`: Updates service API, documentation, waiters, paginators, and examples * Various documentation improvements. * `service/redshift`: Updates service API and documentation * Introduces new field 'LoadSampleData' in CreateCluster operation. Customers can now specify 'LoadSampleData' option during creation of a cluster, which results in loading of sample data in the cluster that is created. * `service/securityhub`: Updates service documentation --- CHANGELOG.md | 15 ++++ aws/endpoints/defaults.go | 18 ++-- aws/version.go | 2 +- models/apis/ec2/2016-11-15/api-2.json | 15 +++- models/apis/location/2020-11-19/api-2.json | 51 +++++------ models/apis/location/2020-11-19/docs-2.json | 65 +++++++------- .../location/2020-11-19/paginators-1.json | 1 + .../apis/mediapackage/2017-10-12/api-2.json | 6 +- .../apis/mediapackage/2017-10-12/docs-2.json | 4 +- models/apis/rds/2014-10-31/docs-2.json | 2 +- models/apis/redshift/2012-12-01/api-2.json | 3 +- models/apis/redshift/2012-12-01/docs-2.json | 15 ++-- .../apis/securityhub/2018-10-26/docs-2.json | 4 +- models/endpoints/endpoints.json | 26 +++--- service/ec2/api.go | 36 ++++++++ service/locationservice/api.go | 86 +++++++++++++++---- service/mediapackage/api.go | 13 ++- service/rds/api.go | 7 -- service/redshift/api.go | 34 +++++--- service/securityhub/api.go | 17 ++-- service/securityhub/doc.go | 7 +- 21 files changed, 287 insertions(+), 140 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 56c3175502a..ea1f67d346e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,18 @@ +Release v1.44.9 (2022-05-06) +=== + +### Service Client Updates +* `service/ec2`: Updates service API + * Add new state values for IPAMs, IPAM Scopes, and IPAM Pools. +* `service/location`: Updates service API, documentation, and paginators +* `service/mediapackage`: Updates service API and documentation + * This release adds Dvb Dash 2014 as an available profile option for Dash Origin Endpoints. +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * Various documentation improvements. +* `service/redshift`: Updates service API and documentation + * Introduces new field 'LoadSampleData' in CreateCluster operation. Customers can now specify 'LoadSampleData' option during creation of a cluster, which results in loading of sample data in the cluster that is created. +* `service/securityhub`: Updates service documentation + Release v1.44.8 (2022-05-05) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 3133e1192ab..3acb1ffa991 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -6899,7 +6899,7 @@ var awsPartition = partition{ Region: "ap-south-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.ap-south-1.aws", + Hostname: "ec2.ap-south-1.api.aws", }, endpointKey{ Region: "ap-southeast-1", @@ -6935,7 +6935,7 @@ var awsPartition = partition{ Region: "eu-west-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.eu-west-1.aws", + Hostname: "ec2.eu-west-1.api.aws", }, endpointKey{ Region: "eu-west-2", @@ -6998,7 +6998,7 @@ var awsPartition = partition{ Region: "sa-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.sa-east-1.aws", + Hostname: "ec2.sa-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -7007,7 +7007,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.us-east-1.aws", + Hostname: "ec2.us-east-1.api.aws", }, endpointKey{ Region: "us-east-1", @@ -7022,7 +7022,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.us-east-2.aws", + Hostname: "ec2.us-east-2.api.aws", }, endpointKey{ Region: "us-east-2", @@ -7046,7 +7046,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: dualStackVariant, }: endpoint{ - Hostname: "api.ec2.us-west-2.aws", + Hostname: "ec2.us-west-2.api.aws", }, endpointKey{ Region: "us-west-2", @@ -10588,6 +10588,9 @@ var awsPartition = partition{ }, "identity-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -13758,6 +13761,9 @@ var awsPartition = partition{ }, "messaging-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, diff --git a/aws/version.go b/aws/version.go index 02cc10e9d01..0f5a41d58dd 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.8" +const SDKVersion = "1.44.9" diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index 11ab3612b8d..e2797bac76b 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -24932,7 +24932,10 @@ "modify-failed", "delete-in-progress", "delete-complete", - "delete-failed" + "delete-failed", + "isolate-in-progress", + "isolate-complete", + "restore-in-progress" ] }, "IpamResourceCidr":{ @@ -25105,7 +25108,10 @@ "modify-failed", "delete-in-progress", "delete-complete", - "delete-failed" + "delete-failed", + "isolate-in-progress", + "isolate-complete", + "restore-in-progress" ] }, "IpamScopeType":{ @@ -25133,7 +25139,10 @@ "modify-failed", "delete-in-progress", "delete-complete", - "delete-failed" + "delete-failed", + "isolate-in-progress", + "isolate-complete", + "restore-in-progress" ] }, "Ipv4PoolCoipId":{"type":"string"}, diff --git a/models/apis/location/2020-11-19/api-2.json b/models/apis/location/2020-11-19/api-2.json index 2f24ba2b95c..b5ffdf56ddf 100644 --- a/models/apis/location/2020-11-19/api-2.json +++ b/models/apis/location/2020-11-19/api-2.json @@ -2543,9 +2543,16 @@ "location":"uri", "locationName":"CollectionName" }, + "MaxResults":{"shape":"ListGeofencesRequestMaxResultsInteger"}, "NextToken":{"shape":"Token"} } }, + "ListGeofencesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, "ListGeofencesResponse":{ "type":"structure", "required":["Entries"], @@ -3103,7 +3110,7 @@ }, "Language":{"shape":"LanguageTag"}, "MaxResults":{"shape":"SearchPlaceIndexForSuggestionsRequestMaxResultsInteger"}, - "Text":{"shape":"SyntheticSearchPlaceIndexForSuggestionsRequestString"} + "Text":{"shape":"SearchPlaceIndexForSuggestionsRequestTextString"} } }, "SearchPlaceIndexForSuggestionsRequestMaxResultsInteger":{ @@ -3112,6 +3119,12 @@ "max":15, "min":1 }, + "SearchPlaceIndexForSuggestionsRequestTextString":{ + "type":"string", + "max":200, + "min":1, + "sensitive":true + }, "SearchPlaceIndexForSuggestionsResponse":{ "type":"structure", "required":[ @@ -3136,7 +3149,7 @@ "FilterCountries":{"shape":"CountryCodeList"}, "Language":{"shape":"LanguageTag"}, "MaxResults":{"shape":"Integer"}, - "Text":{"shape":"SyntheticSearchPlaceIndexForSuggestionsSummaryString"} + "Text":{"shape":"SensitiveString"} } }, "SearchPlaceIndexForTextRequest":{ @@ -3156,9 +3169,15 @@ }, "Language":{"shape":"LanguageTag"}, "MaxResults":{"shape":"PlaceIndexSearchResultLimit"}, - "Text":{"shape":"SyntheticSearchPlaceIndexForTextRequestString"} + "Text":{"shape":"SearchPlaceIndexForTextRequestTextString"} } }, + "SearchPlaceIndexForTextRequestTextString":{ + "type":"string", + "max":200, + "min":1, + "sensitive":true + }, "SearchPlaceIndexForTextResponse":{ "type":"structure", "required":[ @@ -3184,9 +3203,13 @@ "Language":{"shape":"LanguageTag"}, "MaxResults":{"shape":"PlaceIndexSearchResultLimit"}, "ResultBBox":{"shape":"BoundingBox"}, - "Text":{"shape":"SyntheticSearchPlaceIndexForTextSummaryString"} + "Text":{"shape":"SensitiveString"} } }, + "SensitiveString":{ + "type":"string", + "sensitive":true + }, "ServiceQuotaExceededException":{ "type":"structure", "required":["Message"], @@ -3238,26 +3261,6 @@ "member":{"shape":"Step"} }, "String":{"type":"string"}, - "SyntheticSearchPlaceIndexForSuggestionsRequestString":{ - "type":"string", - "max":200, - "min":1, - "sensitive":true - }, - "SyntheticSearchPlaceIndexForSuggestionsSummaryString":{ - "type":"string", - "sensitive":true - }, - "SyntheticSearchPlaceIndexForTextRequestString":{ - "type":"string", - "max":200, - "min":1, - "sensitive":true - }, - "SyntheticSearchPlaceIndexForTextSummaryString":{ - "type":"string", - "sensitive":true - }, "TagKey":{ "type":"string", "max":128, diff --git a/models/apis/location/2020-11-19/docs-2.json b/models/apis/location/2020-11-19/docs-2.json index dd4feaa04ab..45f7b7fb4ab 100644 --- a/models/apis/location/2020-11-19/docs-2.json +++ b/models/apis/location/2020-11-19/docs-2.json @@ -5,7 +5,7 @@ "AssociateTrackerConsumer": "

Creates an association between a geofence collection and a tracker resource. This allows the tracker resource to communicate location data to the linked geofence collection.

You can associate up to five geofence collections to each tracker resource.

Currently not supported — Cross-account configurations, such as creating associations between a tracker resource in one account and a geofence collection in another account.

", "BatchDeleteDevicePositionHistory": "

Deletes the position history of one or more devices from a tracker resource.

", "BatchDeleteGeofence": "

Deletes a batch of geofences from a geofence collection.

This operation deletes the resource permanently.

", - "BatchEvaluateGeofences": "

Evaluates device positions against the geofence geometries from a given geofence collection.

This operation always returns an empty response because geofences are asynchronously evaluated. The evaluation determines if the device has entered or exited a geofenced area, and then publishes one of the following events to Amazon EventBridge:

The last geofence that a device was observed within is tracked for 30 days after the most recent device position update.

Geofence evaluation uses the given device position. It does not account for the optional Accuracy of a DevicePositionUpdate.

", + "BatchEvaluateGeofences": "

Evaluates device positions against the geofence geometries from a given geofence collection.

This operation always returns an empty response because geofences are asynchronously evaluated. The evaluation determines if the device has entered or exited a geofenced area, and then publishes one of the following events to Amazon EventBridge:

The last geofence that a device was observed within is tracked for 30 days after the most recent device position update.

Geofence evaluation uses the given device position. It does not account for the optional Accuracy of a DevicePositionUpdate.

The DeviceID is used as a string to represent the device. You do not need to have a Tracker associated with the DeviceID.

", "BatchGetDevicePosition": "

Lists the latest device positions for requested devices.

", "BatchPutGeofence": "

A batch request for storing geofence geometries into a given geofence collection, or updates the geometry of an existing geofence if a geofence ID is included in the request.

", "BatchUpdateDevicePosition": "

Uploads position update data for one or more devices to a tracker resource. Amazon Location uses the data when it reports the last known device position and position history. Amazon Location retains location data for 30 days.

Position updates are handled based on the PositionFiltering property of the tracker. When PositionFiltering is set to TimeBased, updates are evaluated against linked geofence collections, and location data is stored at a maximum of one position per 30 second interval. If your update frequency is more often than every 30 seconds, only one update per 30 seconds is stored for each unique device ID.

When PositionFiltering is set to DistanceBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than 30 m (98.4 ft).

When PositionFiltering is set to AccuracyBased filtering, location data is stored and evaluated against linked geofence collections only if the device has moved more than the measured accuracy. For example, if two consecutive updates from a device have a horizontal accuracy of 5 m and 10 m, the second update is neither stored or evaluated if the device has moved less than 15 m. If PositionFiltering is set to AccuracyBased filtering, Amazon Location uses the default value { \"Horizontal\": 0} when accuracy is not provided on a DevicePositionUpdate.

", @@ -847,11 +847,11 @@ "LanguageTag": { "base": null, "refs": { - "SearchPlaceIndexForPositionRequest$Language": "

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results. It does not change which results are returned. If the language is not specified, or not supported for a particular result, the partner automatically chooses a language for the result.

", + "SearchPlaceIndexForPositionRequest$Language": "

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for a location around Athens, Greece, with the language parameter set to en. The city in the results will most likely be returned as Athens.

If you set the language parameter to el, for Greek, then the city in the results will more likely be returned as Αθήνα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

", "SearchPlaceIndexForPositionSummary$Language": "

The preferred language used to return results. Matches the language in the request. The value is a valid BCP 47 language tag, for example, en for English.

", - "SearchPlaceIndexForSuggestionsRequest$Language": "

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results. It does not change which results are returned. If the language is not specified, or not supported for a particular result, the partner automatically chooses a language for the result.

Used only when the partner selected is Here.

", + "SearchPlaceIndexForSuggestionsRequest$Language": "

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for Athens, Gr to get suggestions with the language parameter set to en. The results found will most likely be returned as Athens, Greece.

If you set the language parameter to el, for Greek, then the result found will more likely be returned as Αθήνα, Ελλάδα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

", "SearchPlaceIndexForSuggestionsSummary$Language": "

The preferred language used to return results. Matches the language in the request. The value is a valid BCP 47 language tag, for example, en for English.

", - "SearchPlaceIndexForTextRequest$Language": "

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results. It does not change which results are returned. If the language is not specified, or not supported for a particular result, the partner automatically chooses a language for the result.

", + "SearchPlaceIndexForTextRequest$Language": "

The preferred language used to return results. The value must be a valid BCP 47 language tag, for example, en for English.

This setting affects the languages used in the results, but not the results themselves. If no language is specified, or not supported for a particular result, the partner automatically chooses a language for the result.

For an example, we'll use the Greek language. You search for Athens, Greece, with the language parameter set to en. The result found will most likely be returned as Athens.

If you set the language parameter to el, for Greek, then the result found will more likely be returned as Αθήνα.

If the data provider does not have a value for Greek, the result will be in a language that the provider does support.

", "SearchPlaceIndexForTextSummary$Language": "

The preferred language used to return results. Matches the language in the request. The value is a valid BCP 47 language tag, for example, en for English.

" } }, @@ -976,6 +976,12 @@ "refs": { } }, + "ListGeofencesRequestMaxResultsInteger": { + "base": null, + "refs": { + "ListGeofencesRequest$MaxResults": "

An optional limit for the number of geofences returned in a single call.

Default value: 100

" + } + }, "ListGeofencesResponse": { "base": null, "refs": { @@ -1133,7 +1139,7 @@ } }, "Place": { - "base": "

Contains details about addresses or points of interest that match the search criteria.

", + "base": "

Contains details about addresses or points of interest that match the search criteria.

Not all details are included with all responses. Some details may only be returned by specific data partners.

", "refs": { "SearchForPositionResult$Place": "

Details about the search result, such as its address and position.

", "SearchForTextResult$Place": "

Details about the search result, such as its address and position.

" @@ -1161,8 +1167,8 @@ "CalculateRouteMatrixRequestDestinationPositionsList$member": null, "CalculateRouteMatrixResponseSnappedDeparturePositionsList$member": null, "CalculateRouteMatrixResponseSnappedDestinationPositionsList$member": null, - "CalculateRouteRequest$DeparturePosition": "

The start position for the route. Defined in WGS 84 format: [longitude, latitude].

If you specify a departure that's not located on a road, Amazon Location moves the position to the nearest road. If Esri is the provider for your route calculator, specifying a route that is longer than 400 km returns a 400 RoutesValidationException error.

Valid Values: [-180 to 180,-90 to 90]

", - "CalculateRouteRequest$DestinationPosition": "

The finish position for the route. Defined in WGS 84 format: [longitude, latitude].

If you specify a destination that's not located on a road, Amazon Location moves the position to the nearest road.

Valid Values: [-180 to 180,-90 to 90]

", + "CalculateRouteRequest$DeparturePosition": "

The start position for the route. Defined in World Geodetic System (WGS 84) format: [longitude, latitude].

If you specify a departure that's not located on a road, Amazon Location moves the position to the nearest road. If Esri is the provider for your route calculator, specifying a route that is longer than 400 km returns a 400 RoutesValidationException error.

Valid Values: [-180 to 180,-90 to 90]

", + "CalculateRouteRequest$DestinationPosition": "

The finish position for the route. Defined in World Geodetic System (WGS 84) format: [longitude, latitude].

If you specify a destination that's not located on a road, Amazon Location moves the position to the nearest road.

Valid Values: [-180 to 180,-90 to 90]

", "CalculateRouteRequestWaypointPositionsList$member": null, "DevicePosition$Position": "

The last known device position.

", "DevicePositionUpdate$Position": "

The latest device position defined in WGS 84 format: [X or longitude, Y or latitude].

", @@ -1447,7 +1453,7 @@ "SearchForTextResultList": { "base": null, "refs": { - "SearchPlaceIndexForTextResponse$Results": "

A list of Places matching the input text. Each result contains additional information about the specific point of interest.

" + "SearchPlaceIndexForTextResponse$Results": "

A list of Places matching the input text. Each result contains additional information about the specific point of interest.

Not all response properties are included with all responses. Some properties may only be returned by specific data partners.

" } }, "SearchForTextResultRelevanceDouble": { @@ -1483,6 +1489,12 @@ "SearchPlaceIndexForSuggestionsRequest$MaxResults": "

An optional parameter. The maximum number of results returned per request.

The default: 5

" } }, + "SearchPlaceIndexForSuggestionsRequestTextString": { + "base": null, + "refs": { + "SearchPlaceIndexForSuggestionsRequest$Text": "

The free-form partial text to use to generate place suggestions. For example, eiffel tow.

" + } + }, "SearchPlaceIndexForSuggestionsResponse": { "base": null, "refs": { @@ -1499,6 +1511,12 @@ "refs": { } }, + "SearchPlaceIndexForTextRequestTextString": { + "base": null, + "refs": { + "SearchPlaceIndexForTextRequest$Text": "

The address, name, city, or region to be used in the search in free-form text format. For example, 123 Any Street.

" + } + }, "SearchPlaceIndexForTextResponse": { "base": null, "refs": { @@ -1510,6 +1528,13 @@ "SearchPlaceIndexForTextResponse$Summary": "

Contains a summary of the request. Echoes the input values for BiasPosition, FilterBBox, FilterCountries, Language, MaxResults, and Text. Also includes the DataSource of the place index and the bounding box, ResultBBox, which surrounds the search results.

" } }, + "SensitiveString": { + "base": null, + "refs": { + "SearchPlaceIndexForSuggestionsSummary$Text": "

The free-form partial text input specified in the request.

", + "SearchPlaceIndexForTextSummary$Text": "

The search text specified in the request.

" + } + }, "ServiceQuotaExceededException": { "base": "

The operation was denied because the request would exceed the maximum quota set for Amazon Location Service.

", "refs": { @@ -1601,30 +1626,6 @@ "ValidationExceptionField$Name": "

The field name where the invalid entry was detected.

" } }, - "SyntheticSearchPlaceIndexForSuggestionsRequestString": { - "base": null, - "refs": { - "SearchPlaceIndexForSuggestionsRequest$Text": "

The free-form partial text to use to generate place suggestions. For example, eiffel tow.

" - } - }, - "SyntheticSearchPlaceIndexForSuggestionsSummaryString": { - "base": null, - "refs": { - "SearchPlaceIndexForSuggestionsSummary$Text": "

The free-form partial text input specified in the request.

" - } - }, - "SyntheticSearchPlaceIndexForTextRequestString": { - "base": null, - "refs": { - "SearchPlaceIndexForTextRequest$Text": "

The address, name, city, or region to be used in the search in free-form text format. For example, 123 Any Street.

" - } - }, - "SyntheticSearchPlaceIndexForTextSummaryString": { - "base": null, - "refs": { - "SearchPlaceIndexForTextSummary$Text": "

The search text specified in the request.

" - } - }, "TagKey": { "base": null, "refs": { diff --git a/models/apis/location/2020-11-19/paginators-1.json b/models/apis/location/2020-11-19/paginators-1.json index 26dfd677ec0..dac8d15010e 100644 --- a/models/apis/location/2020-11-19/paginators-1.json +++ b/models/apis/location/2020-11-19/paginators-1.json @@ -21,6 +21,7 @@ "ListGeofences": { "input_token": "NextToken", "output_token": "NextToken", + "limit_key": "MaxResults", "result_key": "Entries" }, "ListMaps": { diff --git a/models/apis/mediapackage/2017-10-12/api-2.json b/models/apis/mediapackage/2017-10-12/api-2.json index 23d4a1bd8bb..788a2edb211 100644 --- a/models/apis/mediapackage/2017-10-12/api-2.json +++ b/models/apis/mediapackage/2017-10-12/api-2.json @@ -2164,7 +2164,8 @@ "enum": [ "NONE", "HBBTV_1_5", - "HYBRIDCAST" + "HYBRIDCAST", + "DVB_DASH_2014" ], "type": "string" }, @@ -2643,7 +2644,8 @@ "enum": [ "NONE", "HTTP-HEAD", - "HTTP-ISO" + "HTTP-ISO", + "HTTP-XSDATE" ], "type": "string" }, diff --git a/models/apis/mediapackage/2017-10-12/docs-2.json b/models/apis/mediapackage/2017-10-12/docs-2.json index 866fc660b75..346f1910365 100644 --- a/models/apis/mediapackage/2017-10-12/docs-2.json +++ b/models/apis/mediapackage/2017-10-12/docs-2.json @@ -258,7 +258,7 @@ "Profile" : { "base" : null, "refs" : { - "DashPackage$Profile" : "The Dynamic Adaptive Streaming over HTTP (DASH) profile type. When set to \"HBBTV_1_5\", HbbTV 1.5 compliant output is enabled." + "DashPackage$Profile" : "The Dynamic Adaptive Streaming over HTTP (DASH) profile type. When set to \"HBBTV_1_5\", HbbTV 1.5 compliant output is enabled. When set to \"DVB-DASH_2014\", DVB-DASH 2014 compliant output is enabled." } }, "S3Destination" : { @@ -451,7 +451,7 @@ "CmafEncryption$ConstantInitializationVector" : "An optional 128-bit, 16-byte hex value represented by a 32-character string, used in conjunction with the key for encrypting blocks. If you don't specify a value, then MediaPackage creates the constant initialization vector (IV).", "CmafPackage$SegmentPrefix" : "An optional custom string that is prepended to the name of each segment. If not specified, it defaults to the ChannelId.", "CmafPackageCreateOrUpdateParameters$SegmentPrefix" : "An optional custom string that is prepended to the name of each segment. If not specified, it defaults to the ChannelId.", - "DashPackage$UtcTimingUri" : "Specifies the value attribute of the UTCTiming field when utcTiming is set to HTTP-ISO or HTTP-HEAD", + "DashPackage$UtcTimingUri" : "Specifies the value attribute of the UTCTiming field when utcTiming is set to HTTP-ISO, HTTP-HEAD or HTTP-XSDATE", "EgressAccessLogs$LogGroupName" : "Customize the log group name.", "HarvestJob$Arn" : "The Amazon Resource Name (ARN) assigned to the HarvestJob.\n", "HarvestJob$ChannelId" : "The ID of the Channel that the HarvestJob will harvest from.\n", diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 8ea0a201c7e..b71f11e0f03 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -3771,7 +3771,7 @@ "CreateDBInstanceMessage$Engine": "

The name of the database engine to be used for this instance.

Not every database engine is available for every Amazon Web Services Region.

Valid Values:

", "CreateDBInstanceMessage$MasterUsername": "

The name for the master user.

Amazon Aurora

Not applicable. The name for the master user is managed by the DB cluster.

Amazon RDS

Constraints:

", "CreateDBInstanceMessage$MasterUserPassword": "

The password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".

Amazon Aurora

Not applicable. The password for the master user is managed by the DB cluster.

MariaDB

Constraints: Must contain from 8 to 41 characters.

Microsoft SQL Server

Constraints: Must contain from 8 to 128 characters.

MySQL

Constraints: Must contain from 8 to 41 characters.

Oracle

Constraints: Must contain from 8 to 30 characters.

PostgreSQL

Constraints: Must contain from 8 to 128 characters.

", - "CreateDBInstanceMessage$AvailabilityZone": "

The Availability Zone (AZ) where the database will be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones.

Amazon Aurora

Each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.

Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint.

If you're creating a DB instance in an RDS on VMware environment, specify the identifier of the custom Availability Zone to create the DB instance in.

For more information about RDS on VMware, see the RDS on VMware User Guide.

", + "CreateDBInstanceMessage$AvailabilityZone": "

The Availability Zone (AZ) where the database will be created. For information on Amazon Web Services Regions and Availability Zones, see Regions and Availability Zones.

Amazon Aurora

Each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.

Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region.

Example: us-east-1d

Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint.

", "CreateDBInstanceMessage$DBSubnetGroupName": "

A DB subnet group to associate with this DB instance.

Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.

Example: mydbsubnetgroup

", "CreateDBInstanceMessage$PreferredMaintenanceWindow": "

The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). For more information, see Amazon RDS Maintenance Window.

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

", "CreateDBInstanceMessage$DBParameterGroupName": "

The name of the DB parameter group to associate with this DB instance. If you do not specify a value, then the default DB parameter group for the specified DB engine and version is used.

This setting doesn't apply to RDS Custom.

Constraints:

", diff --git a/models/apis/redshift/2012-12-01/api-2.json b/models/apis/redshift/2012-12-01/api-2.json index e356e7eee1c..7cdeecd6d7a 100644 --- a/models/apis/redshift/2012-12-01/api-2.json +++ b/models/apis/redshift/2012-12-01/api-2.json @@ -3006,7 +3006,8 @@ "SnapshotScheduleIdentifier":{"shape":"String"}, "AvailabilityZoneRelocation":{"shape":"BooleanOptional"}, "AquaConfigurationStatus":{"shape":"AquaConfigurationStatus"}, - "DefaultIamRoleArn":{"shape":"String"} + "DefaultIamRoleArn":{"shape":"String"}, + "LoadSampleData":{"shape":"String"} } }, "CreateClusterParameterGroupMessage":{ diff --git a/models/apis/redshift/2012-12-01/docs-2.json b/models/apis/redshift/2012-12-01/docs-2.json index b778cf464cc..a0c8d0e67f4 100644 --- a/models/apis/redshift/2012-12-01/docs-2.json +++ b/models/apis/redshift/2012-12-01/docs-2.json @@ -6,7 +6,7 @@ "AddPartner": "

Adds a partner integration to a cluster. This operation authorizes a partner to push status updates for the specified database. To complete the integration, you also set up the integration on the partner website.

", "AssociateDataShareConsumer": "

From a datashare consumer account, associates a datashare with the account (AssociateEntireAccount) or the specified namespace (ConsumerArn). If you make this association, the consumer can consume the datashare.

", "AuthorizeClusterSecurityGroupIngress": "

Adds an inbound (ingress) rule to an Amazon Redshift security group. Depending on whether the application accessing your cluster is running on the Internet or an Amazon EC2 instance, you can authorize inbound access to either a Classless Interdomain Routing (CIDR)/Internet Protocol (IP) range or to an Amazon EC2 security group. You can add as many as 20 ingress rules to an Amazon Redshift security group.

If you authorize access to an Amazon EC2 security group, specify EC2SecurityGroupName and EC2SecurityGroupOwnerId. The Amazon EC2 security group and Amazon Redshift cluster must be in the same Amazon Web Services Region.

If you authorize access to a CIDR/IP address range, specify CIDRIP. For an overview of CIDR blocks, see the Wikipedia article on Classless Inter-Domain Routing.

You must also associate the security group with a cluster so that clients running on these IP addresses or the EC2 instance are authorized to connect to the cluster. For information about managing security groups, go to Working with Security Groups in the Amazon Redshift Cluster Management Guide.

", - "AuthorizeDataShare": "

From a data producer account, authorizes the sharing of a datashare with one or more consumer accounts or managing entities. To authorize a datashare for a data consumer, the producer account must have the correct access privileges.

", + "AuthorizeDataShare": "

From a data producer account, authorizes the sharing of a datashare with one or more consumer accounts or managing entities. To authorize a datashare for a data consumer, the producer account must have the correct access permissions.

", "AuthorizeEndpointAccess": "

Grants access to a cluster.

", "AuthorizeSnapshotAccess": "

Authorizes the specified Amazon Web Services account to restore the specified snapshot.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

", "BatchDeleteClusterSnapshots": "

Deletes a set of cluster snapshots.

", @@ -28,7 +28,7 @@ "CreateSnapshotSchedule": "

Create a snapshot schedule that can be associated to a cluster and which overrides the default system backup schedule.

", "CreateTags": "

Adds tags to a cluster.

A resource can have up to 50 tags. If you try to create more than 50 tags for a resource, you will receive an error and the attempt will fail.

If you specify a key that already exists for the resource, the value for that key will be updated with the new value.

", "CreateUsageLimit": "

Creates a usage limit for a specified Amazon Redshift feature on a cluster. The usage limit is identified by the returned usage limit identifier.

", - "DeauthorizeDataShare": "

From the producer account, removes authorization from the specified datashare.

", + "DeauthorizeDataShare": "

From a datashare producer account, removes authorization from the specified datashare.

", "DeleteAuthenticationProfile": "

Deletes an authentication profile.

", "DeleteCluster": "

Deletes a previously provisioned cluster without its final snapshot being created. A successful response from the web service indicates that the request was received correctly. Use DescribeClusters to monitor the status of the deletion. The delete operation cannot be canceled or reverted once submitted. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

If you want to shut down the cluster and retain it for future use, set SkipFinalClusterSnapshot to false and specify a name for FinalClusterSnapshotIdentifier. You can later restore this snapshot to resume using the cluster. If a final cluster snapshot is requested, the status of the cluster will be \"final-snapshot\" while the snapshot is being taken, then it's \"deleting\" once Amazon Redshift begins deleting the cluster.

For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

", "DeleteClusterParameterGroup": "

Deletes a specified Amazon Redshift parameter group.

You cannot delete a parameter group if it is associated with a cluster.

", @@ -84,10 +84,10 @@ "DescribeUsageLimits": "

Shows usage limits on a cluster. Results are filtered based on the combination of input usage limit identifier, cluster identifier, and feature type parameters:

", "DisableLogging": "

Stops logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

", "DisableSnapshotCopy": "

Disables the automatic copying of snapshots from one region to another region for a specified cluster.

If your cluster and its snapshots are encrypted using an encrypted symmetric key from Key Management Service, use DeleteSnapshotCopyGrant to delete the grant that grants Amazon Redshift permission to the key in the destination region.

", - "DisassociateDataShareConsumer": "

From a consumer account, remove association for the specified datashare.

", + "DisassociateDataShareConsumer": "

From a datashare consumer account, remove association for the specified datashare.

", "EnableLogging": "

Starts logging information, such as queries and connection attempts, for the specified Amazon Redshift cluster.

", "EnableSnapshotCopy": "

Enables the automatic copy of snapshots from one region to another region for a specified cluster.

", - "GetClusterCredentials": "

Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate is True. You can optionally specify one or more database user groups that the user will join at log on. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Using IAM Authentication to Generate Database User Credentials in the Amazon Redshift Cluster Management Guide.

The Identity and Access Management (IAM) user or role that runs GetClusterCredentials must have an IAM policy attached that allows access to all necessary actions and resources. For more information about permissions, see Resource Policies for GetClusterCredentials in the Amazon Redshift Cluster Management Guide.

If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup action with access to the listed dbgroups.

In addition, if the AutoCreate parameter is set to True, then the policy must include the redshift:CreateClusterUser privilege.

If the DbName parameter is specified, the IAM policy must allow access to the resource dbname for the specified database name.

", + "GetClusterCredentials": "

Returns a database user name and temporary password with temporary authorization to log on to an Amazon Redshift database. The action returns the database user name prefixed with IAM: if AutoCreate is False or IAMA: if AutoCreate is True. You can optionally specify one or more database user groups that the user will join at log on. By default, the temporary credentials expire in 900 seconds. You can optionally specify a duration between 900 seconds (15 minutes) and 3600 seconds (60 minutes). For more information, see Using IAM Authentication to Generate Database User Credentials in the Amazon Redshift Cluster Management Guide.

The Identity and Access Management (IAM) user or role that runs GetClusterCredentials must have an IAM policy attached that allows access to all necessary actions and resources. For more information about permissions, see Resource Policies for GetClusterCredentials in the Amazon Redshift Cluster Management Guide.

If the DbGroups parameter is specified, the IAM policy must allow the redshift:JoinGroup action with access to the listed dbgroups.

In addition, if the AutoCreate parameter is set to True, then the policy must include the redshift:CreateClusterUser permission.

If the DbName parameter is specified, the IAM policy must allow access to the resource dbname for the specified database name.

", "GetReservedNodeExchangeConfigurationOptions": "

Gets the configuration options for the reserved-node exchange. These options include information about the source reserved node and target reserved node offering. Details include the node type, the price, the node count, and the offering type.

", "GetReservedNodeExchangeOfferings": "

Returns an array of DC2 ReservedNodeOfferings that matches the payment type, term, and usage price of the given DC1 reserved node.

", "ModifyAquaConfiguration": "

Modifies whether a cluster can use AQUA (Advanced Query Accelerator).

", @@ -109,7 +109,7 @@ "PauseCluster": "

Pauses a cluster.

", "PurchaseReservedNodeOffering": "

Allows you to purchase reserved nodes. Amazon Redshift offers a predefined set of reserved node offerings. You can purchase one or more of the offerings. You can call the DescribeReservedNodeOfferings API to obtain the available reserved node offerings. You can call this API by providing a specific reserved node offering and the number of nodes you want to reserve.

For more information about reserved node offerings, go to Purchasing Reserved Nodes in the Amazon Redshift Cluster Management Guide.

", "RebootCluster": "

Reboots a cluster. This action is taken as soon as possible. It results in a momentary outage to the cluster, during which the cluster status is set to rebooting. A cluster event is created when the reboot is completed. Any pending cluster modifications (see ModifyCluster) are applied at this reboot. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide.

", - "RejectDataShare": "

From the consumer account, rejects the specified datashare.

", + "RejectDataShare": "

From a datashare consumer account, rejects the specified datashare.

", "ResetClusterParameterGroup": "

Sets one or more parameters of the specified parameter group to their default values and sets the source values of the parameters to \"engine-default\". To reset the entire parameter group specify the ResetAllParameters parameter. For parameter changes to take effect you must reboot any associated clusters.

", "ResizeCluster": "

Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method.

Elastic resize operations have the following restrictions:

", "RestoreFromClusterSnapshot": "

Creates a new cluster from a snapshot. By default, Amazon Redshift creates the resulting cluster with the same configuration as the original cluster from which the snapshot was created, except that the new cluster is created with the default cluster security and parameter groups. After Amazon Redshift creates the cluster, you can use the ModifyCluster API to associate a different security group and different parameter group with the restored cluster. If you are using a DS node type, you can also choose to change to another DS node type of the same size during restore.

If you restore a cluster into a VPC, you must provide a cluster subnet group where you want the cluster restored.

For more information about working with snapshots, go to Amazon Redshift Snapshots in the Amazon Redshift Cluster Management Guide.

", @@ -902,7 +902,7 @@ "DataShareAssociationList": { "base": null, "refs": { - "DataShare$DataShareAssociations": "

A value that specifies when the datashare has an association between a producer and data consumers.

" + "DataShare$DataShareAssociations": "

A value that specifies when the datashare has an association between producer and data consumers.

" } }, "DataShareList": { @@ -2951,7 +2951,7 @@ "Cluster$ClusterNamespaceArn": "

The namespace Amazon Resource Name (ARN) of the cluster.

", "Cluster$DefaultIamRoleArn": "

The Amazon Resource Name (ARN) for the IAM role set as default for the cluster.

", "ClusterAssociatedToSchedule$ClusterIdentifier": "

", - "ClusterCredentials$DbUser": "

A database user name that is authorized to log on to the database DbName using the password DbPassword. If the specified DbUser exists in the database, the new user name has the same database privileges as the the user named in DbUser. By default, the user is added to PUBLIC. If the DbGroups parameter is specifed, DbUser is added to the listed groups for any sessions created using these credentials.

", + "ClusterCredentials$DbUser": "

A database user name that is authorized to log on to the database DbName using the password DbPassword. If the specified DbUser exists in the database, the new user name has the same database permissions as the the user named in DbUser. By default, the user is added to PUBLIC. If the DbGroups parameter is specifed, DbUser is added to the listed groups for any sessions created using these credentials.

", "ClusterDbRevision$ClusterIdentifier": "

The unique identifier of the cluster.

", "ClusterDbRevision$CurrentDatabaseRevision": "

A string representing the current cluster version.

", "ClusterDbRevisionsMessage$Marker": "

A string representing the starting point for the next set of revisions. If a value is returned in a response, you can retrieve the next set of revisions by providing the value in the marker parameter and retrying the command. If the marker field is empty, all revisions have already been returned.

", @@ -3014,6 +3014,7 @@ "CreateClusterMessage$MaintenanceTrackName": "

An optional parameter for the name of the maintenance track for the cluster. If you don't provide a maintenance track name, the cluster is assigned to the current track.

", "CreateClusterMessage$SnapshotScheduleIdentifier": "

A unique identifier for the snapshot schedule.

", "CreateClusterMessage$DefaultIamRoleArn": "

The Amazon Resource Name (ARN) for the IAM role that was set as default for the cluster when the cluster was created.

", + "CreateClusterMessage$LoadSampleData": "

A flag that specifies whether to load sample data once the cluster is created.

", "CreateClusterParameterGroupMessage$ParameterGroupName": "

The name of the cluster parameter group.

Constraints:

This value is stored as a lower-case string.

", "CreateClusterParameterGroupMessage$ParameterGroupFamily": "

The Amazon Redshift engine version to which the cluster parameter group applies. The cluster engine version determines the set of parameters.

To get a list of valid parameter group family names, you can call DescribeClusterParameterGroups. By default, Amazon Redshift returns a list of all the parameter groups that are owned by your Amazon Web Services account, including the default parameter groups for each Amazon Redshift engine version. The parameter group family names associated with the default parameter groups provide you the valid values. For example, a valid family name is \"redshift-1.0\".

", "CreateClusterParameterGroupMessage$Description": "

A description of the parameter group.

", diff --git a/models/apis/securityhub/2018-10-26/docs-2.json b/models/apis/securityhub/2018-10-26/docs-2.json index 25bebe59d40..f5158c8d884 100644 --- a/models/apis/securityhub/2018-10-26/docs-2.json +++ b/models/apis/securityhub/2018-10-26/docs-2.json @@ -1,12 +1,12 @@ { "version": "2.0", - "service": "

Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from Amazon Web Services accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the Security HubUser Guide .

When you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to.

For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

The following throttling limits apply to using Security Hub API operations.

", + "service": "

Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status of your environment based on controls from supported security standards. Security Hub collects security data from Amazon Web Services accounts, services, and integrated third-party products and helps you analyze security trends in your environment to identify the highest priority security issues. For more information about Security Hub, see the Security HubUser Guide .

When you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services Region that is currently active or in the specific Amazon Web Services Region that you specify in your request. Any configuration or settings change that results from the operation is applied only to that Region. To make the same change in other Regions, execute the same command for each Region to apply the change to.

For example, if your Region is set to us-west-2, when you use CreateMembers to add a member account to Security Hub, the association of the member account with the administrator account is created only in the us-west-2 Region. Security Hub must be enabled for the member account in the same Region that the invitation was sent from.

The following throttling limits apply to using Security Hub API operations.

", "operations": { "AcceptAdministratorInvitation": "

Accepts the invitation to be a member account and be monitored by the Security Hub administrator account that the invitation was sent from.

This operation is only used by member accounts that are not added through Organizations.

When the member account accepts the invitation, permission is granted to the administrator account to view findings generated in the member account.

", "AcceptInvitation": "

This method is deprecated. Instead, use AcceptAdministratorInvitation.

The Security Hub console continues to use AcceptInvitation. It will eventually change to use AcceptAdministratorInvitation. Any IAM policies that specifically control access to this function must continue to use AcceptInvitation. You should also add AcceptAdministratorInvitation to your policies to ensure that the correct permissions are in place after the console begins to use AcceptAdministratorInvitation.

Accepts the invitation to be a member account and be monitored by the Security Hub administrator account that the invitation was sent from.

This operation is only used by member accounts that are not added through Organizations.

When the member account accepts the invitation, permission is granted to the administrator account to view findings generated in the member account.

", "BatchDisableStandards": "

Disables the standards specified by the provided StandardsSubscriptionArns.

For more information, see Security Standards section of the Security Hub User Guide.

", "BatchEnableStandards": "

Enables the standards specified by the provided StandardsArn. To obtain the ARN for a standard, use the DescribeStandards operation.

For more information, see the Security Standards section of the Security Hub User Guide.

", - "BatchImportFindings": "

Imports security findings generated by a finding provider into Security Hub. This action is requested by the finding provider to import its findings into Security Hub.

BatchImportFindings must be called by one of the following:

The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

After a finding is created, BatchImportFindings cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow.

Finding providers also should not use BatchImportFindings to update the following attributes.

Instead, finding providers use FindingProviderFields to provide values for these attributes.

", + "BatchImportFindings": "

Imports security findings generated by a finding provider into Security Hub. This action is requested by the finding provider to import its findings into Security Hub.

BatchImportFindings must be called by one of the following:

The maximum allowed size for a finding is 240 Kb. An error is returned for any finding larger than 240 Kb.

After a finding is created, BatchImportFindings cannot be used to update the following finding fields and objects, which Security Hub customers use to manage their investigation workflow.

Finding providers also should not use BatchImportFindings to update the following attributes.

Instead, finding providers use FindingProviderFields to provide values for these attributes.

", "BatchUpdateFindings": "

Used by Security Hub customers to update information about their investigation into a finding. Requested by administrator accounts or member accounts. Administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account.

Updates from BatchUpdateFindings do not affect the value of UpdatedAt for a finding.

Administrator and member accounts can use BatchUpdateFindings to update the following finding fields and objects.

You can configure IAM policies to restrict access to fields and field values. For example, you might not want member accounts to be able to suppress findings or change the finding severity. See Configuring access to BatchUpdateFindings in the Security Hub User Guide.

", "CreateActionTarget": "

Creates a custom action target in Security Hub.

You can use custom actions on findings and insights in Security Hub to trigger target actions in Amazon CloudWatch Events.

", "CreateFindingAggregator": "

Used to enable finding aggregation. Must be called from the aggregation Region.

For more details about cross-Region replication, see Configuring finding aggregation in the Security Hub User Guide.

", diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index a643539940d..84b4c9e6f5d 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -3950,7 +3950,7 @@ "ap-northeast-3" : { }, "ap-south-1" : { "variants" : [ { - "hostname" : "api.ec2.ap-south-1.aws", + "hostname" : "ec2.ap-south-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -3968,7 +3968,7 @@ "eu-south-1" : { }, "eu-west-1" : { "variants" : [ { - "hostname" : "api.ec2.eu-west-1.aws", + "hostname" : "ec2.eu-west-1.api.aws", "tags" : [ "dualstack" ] } ] }, @@ -4012,26 +4012,26 @@ "me-south-1" : { }, "sa-east-1" : { "variants" : [ { - "hostname" : "api.ec2.sa-east-1.aws", + "hostname" : "ec2.sa-east-1.api.aws", "tags" : [ "dualstack" ] } ] }, "us-east-1" : { "variants" : [ { - "hostname" : "api.ec2.us-east-1.aws", - "tags" : [ "dualstack" ] - }, { "hostname" : "ec2-fips.us-east-1.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ec2.us-east-1.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-east-2" : { "variants" : [ { - "hostname" : "api.ec2.us-east-2.aws", - "tags" : [ "dualstack" ] - }, { "hostname" : "ec2-fips.us-east-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ec2.us-east-2.api.aws", + "tags" : [ "dualstack" ] } ] }, "us-west-1" : { @@ -4042,11 +4042,11 @@ }, "us-west-2" : { "variants" : [ { - "hostname" : "api.ec2.us-west-2.aws", - "tags" : [ "dualstack" ] - }, { "hostname" : "ec2-fips.us-west-2.amazonaws.com", "tags" : [ "fips" ] + }, { + "hostname" : "ec2.us-west-2.api.aws", + "tags" : [ "dualstack" ] } ] } } @@ -6228,6 +6228,7 @@ }, "identity-chime" : { "endpoints" : { + "eu-central-1" : { }, "us-east-1" : { "variants" : [ { "hostname" : "identity-chime-fips.us-east-1.amazonaws.com", @@ -8088,6 +8089,7 @@ }, "messaging-chime" : { "endpoints" : { + "eu-central-1" : { }, "us-east-1" : { "variants" : [ { "hostname" : "messaging-chime-fips.us-east-1.amazonaws.com", diff --git a/service/ec2/api.go b/service/ec2/api.go index e698f89cf2c..d0abc725833 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -165470,6 +165470,15 @@ const ( // IpamPoolStateDeleteFailed is a IpamPoolState enum value IpamPoolStateDeleteFailed = "delete-failed" + + // IpamPoolStateIsolateInProgress is a IpamPoolState enum value + IpamPoolStateIsolateInProgress = "isolate-in-progress" + + // IpamPoolStateIsolateComplete is a IpamPoolState enum value + IpamPoolStateIsolateComplete = "isolate-complete" + + // IpamPoolStateRestoreInProgress is a IpamPoolState enum value + IpamPoolStateRestoreInProgress = "restore-in-progress" ) // IpamPoolState_Values returns all elements of the IpamPoolState enum @@ -165484,6 +165493,9 @@ func IpamPoolState_Values() []string { IpamPoolStateDeleteInProgress, IpamPoolStateDeleteComplete, IpamPoolStateDeleteFailed, + IpamPoolStateIsolateInProgress, + IpamPoolStateIsolateComplete, + IpamPoolStateRestoreInProgress, } } @@ -165542,6 +165554,15 @@ const ( // IpamScopeStateDeleteFailed is a IpamScopeState enum value IpamScopeStateDeleteFailed = "delete-failed" + + // IpamScopeStateIsolateInProgress is a IpamScopeState enum value + IpamScopeStateIsolateInProgress = "isolate-in-progress" + + // IpamScopeStateIsolateComplete is a IpamScopeState enum value + IpamScopeStateIsolateComplete = "isolate-complete" + + // IpamScopeStateRestoreInProgress is a IpamScopeState enum value + IpamScopeStateRestoreInProgress = "restore-in-progress" ) // IpamScopeState_Values returns all elements of the IpamScopeState enum @@ -165556,6 +165577,9 @@ func IpamScopeState_Values() []string { IpamScopeStateDeleteInProgress, IpamScopeStateDeleteComplete, IpamScopeStateDeleteFailed, + IpamScopeStateIsolateInProgress, + IpamScopeStateIsolateComplete, + IpamScopeStateRestoreInProgress, } } @@ -165602,6 +165626,15 @@ const ( // IpamStateDeleteFailed is a IpamState enum value IpamStateDeleteFailed = "delete-failed" + + // IpamStateIsolateInProgress is a IpamState enum value + IpamStateIsolateInProgress = "isolate-in-progress" + + // IpamStateIsolateComplete is a IpamState enum value + IpamStateIsolateComplete = "isolate-complete" + + // IpamStateRestoreInProgress is a IpamState enum value + IpamStateRestoreInProgress = "restore-in-progress" ) // IpamState_Values returns all elements of the IpamState enum @@ -165616,6 +165649,9 @@ func IpamState_Values() []string { IpamStateDeleteInProgress, IpamStateDeleteComplete, IpamStateDeleteFailed, + IpamStateIsolateInProgress, + IpamStateIsolateComplete, + IpamStateRestoreInProgress, } } diff --git a/service/locationservice/api.go b/service/locationservice/api.go index 8738610364c..bbf085c34b4 100644 --- a/service/locationservice/api.go +++ b/service/locationservice/api.go @@ -383,6 +383,9 @@ func (c *LocationService) BatchEvaluateGeofencesRequest(input *BatchEvaluateGeof // Geofence evaluation uses the given device position. It does not account for // the optional Accuracy of a DevicePositionUpdate. // +// The DeviceID is used as a string to represent the device. You do not need +// to have a Tracker associated with the DeviceID. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3619,7 +3622,7 @@ func (c *LocationService) ListGeofencesRequest(input *ListGeofencesInput) (req * Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, - LimitToken: "", + LimitToken: "MaxResults", TruncationToken: "", }, } @@ -7025,8 +7028,9 @@ type CalculateRouteInput struct { // Valid Values: false | true DepartNow *bool `type:"boolean"` - // The start position for the route. Defined in WGS 84 (https://earth-info.nga.mil/GandG/wgs84/index.html) - // format: [longitude, latitude]. + // The start position for the route. Defined in World Geodetic System (WGS 84) + // (https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84) format: [longitude, + // latitude]. // // * For example, [-123.115, 49.285] // @@ -7054,8 +7058,9 @@ type CalculateRouteInput struct { // format: YYYY-MM-DDThh:mm:ss.sssZ. For example, 2020–07-2T12:15:20.000Z+01:00 DepartureTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // The finish position for the route. Defined in WGS 84 (https://earth-info.nga.mil/GandG/wgs84/index.html) - // format: [longitude, latitude]. + // The finish position for the route. Defined in World Geodetic System (WGS + // 84) (https://earth-info.nga.mil/index.php?dir=wgs84&action=wgs84) format: + // [longitude, latitude]. // // * For example, [-122.339, 47.615] // @@ -12260,6 +12265,11 @@ type ListGeofencesInput struct { // CollectionName is a required field CollectionName *string `location:"uri" locationName:"CollectionName" min:"1" type:"string" required:"true"` + // An optional limit for the number of geofences returned in a single call. + // + // Default value: 100 + MaxResults *int64 `min:"1" type:"integer"` + // The pagination token specifying which page of results to return in the response. // If no token is provided, the default page is the first page. // @@ -12294,6 +12304,9 @@ func (s *ListGeofencesInput) Validate() error { if s.CollectionName != nil && len(*s.CollectionName) < 1 { invalidParams.Add(request.NewErrParamMinLen("CollectionName", 1)) } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if s.NextToken != nil && len(*s.NextToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } @@ -12310,6 +12323,12 @@ func (s *ListGeofencesInput) SetCollectionName(v string) *ListGeofencesInput { return s } +// SetMaxResults sets the MaxResults field's value. +func (s *ListGeofencesInput) SetMaxResults(v int64) *ListGeofencesInput { + s.MaxResults = &v + return s +} + // SetNextToken sets the NextToken field's value. func (s *ListGeofencesInput) SetNextToken(v string) *ListGeofencesInput { s.NextToken = &v @@ -13460,6 +13479,9 @@ func (s *MapConfiguration) SetStyle(v string) *MapConfiguration { // Contains details about addresses or points of interest that match the search // criteria. +// +// Not all details are included with all responses. Some details may only be +// returned by specific data partners. type Place struct { _ struct{} `type:"structure"` @@ -14180,10 +14202,19 @@ type SearchPlaceIndexForPositionInput struct { // BCP 47 (https://tools.ietf.org/search/bcp47) language tag, for example, en // for English. // - // This setting affects the languages used in the results. It does not change - // which results are returned. If the language is not specified, or not supported - // for a particular result, the partner automatically chooses a language for - // the result. + // This setting affects the languages used in the results, but not the results + // themselves. If no language is specified, or not supported for a particular + // result, the partner automatically chooses a language for the result. + // + // For an example, we'll use the Greek language. You search for a location around + // Athens, Greece, with the language parameter set to en. The city in the results + // will most likely be returned as Athens. + // + // If you set the language parameter to el, for Greek, then the city in the + // results will more likely be returned as Αθήνα. + // + // If the data provider does not have a value for Greek, the result will be + // in a language that the provider does support. Language *string `min:"2" type:"string"` // An optional parameter. The maximum number of results returned per request. @@ -14462,12 +14493,19 @@ type SearchPlaceIndexForSuggestionsInput struct { // BCP 47 (https://tools.ietf.org/search/bcp47) language tag, for example, en // for English. // - // This setting affects the languages used in the results. It does not change - // which results are returned. If the language is not specified, or not supported - // for a particular result, the partner automatically chooses a language for - // the result. + // This setting affects the languages used in the results. If no language is + // specified, or not supported for a particular result, the partner automatically + // chooses a language for the result. + // + // For an example, we'll use the Greek language. You search for Athens, Gr to + // get suggestions with the language parameter set to en. The results found + // will most likely be returned as Athens, Greece. // - // Used only when the partner selected is Here. + // If you set the language parameter to el, for Greek, then the result found + // will more likely be returned as Αθήνα, Ελλάδα. + // + // If the data provider does not have a value for Greek, the result will be + // in a language that the provider does support. Language *string `min:"2" type:"string"` // An optional parameter. The maximum number of results returned per request. @@ -14808,10 +14846,19 @@ type SearchPlaceIndexForTextInput struct { // BCP 47 (https://tools.ietf.org/search/bcp47) language tag, for example, en // for English. // - // This setting affects the languages used in the results. It does not change - // which results are returned. If the language is not specified, or not supported - // for a particular result, the partner automatically chooses a language for - // the result. + // This setting affects the languages used in the results, but not the results + // themselves. If no language is specified, or not supported for a particular + // result, the partner automatically chooses a language for the result. + // + // For an example, we'll use the Greek language. You search for Athens, Greece, + // with the language parameter set to en. The result found will most likely + // be returned as Athens. + // + // If you set the language parameter to el, for Greek, then the result found + // will more likely be returned as Αθήνα. + // + // If the data provider does not have a value for Greek, the result will be + // in a language that the provider does support. Language *string `min:"2" type:"string"` // An optional parameter. The maximum number of results returned per request. @@ -14933,6 +14980,9 @@ type SearchPlaceIndexForTextOutput struct { // A list of Places matching the input text. Each result contains additional // information about the specific point of interest. // + // Not all response properties are included with all responses. Some properties + // may only be returned by specific data partners. + // // Results is a required field Results []*SearchForTextResult `type:"list" required:"true"` diff --git a/service/mediapackage/api.go b/service/mediapackage/api.go index 4144b18a034..b9eecd3b0f1 100644 --- a/service/mediapackage/api.go +++ b/service/mediapackage/api.go @@ -3149,7 +3149,8 @@ type DashPackage struct { PeriodTriggers []*string `locationName:"periodTriggers" type:"list" enum:"__PeriodTriggersElement"` // The Dynamic Adaptive Streaming over HTTP (DASH) profile type. When set to - // "HBBTV_1_5", HbbTV 1.5 compliant output is enabled. + // "HBBTV_1_5", HbbTV 1.5 compliant output is enabled. When set to "DVB-DASH_2014", + // DVB-DASH 2014 compliant output is enabled. Profile *string `locationName:"profile" type:"string" enum:"Profile"` // Duration (in seconds) of each segment. Actual segments will berounded to @@ -3175,7 +3176,7 @@ type DashPackage struct { UtcTiming *string `locationName:"utcTiming" type:"string" enum:"UtcTiming"` // Specifies the value attribute of the UTCTiming field when utcTiming is set - // to HTTP-ISO or HTTP-HEAD + // to HTTP-ISO, HTTP-HEAD or HTTP-XSDATE UtcTimingUri *string `locationName:"utcTimingUri" type:"string"` } @@ -7125,6 +7126,9 @@ const ( // ProfileHybridcast is a Profile enum value ProfileHybridcast = "HYBRIDCAST" + + // ProfileDvbDash2014 is a Profile enum value + ProfileDvbDash2014 = "DVB_DASH_2014" ) // Profile_Values returns all elements of the Profile enum @@ -7133,6 +7137,7 @@ func Profile_Values() []string { ProfileNone, ProfileHbbtv15, ProfileHybridcast, + ProfileDvbDash2014, } } @@ -7205,6 +7210,9 @@ const ( // UtcTimingHttpIso is a UtcTiming enum value UtcTimingHttpIso = "HTTP-ISO" + + // UtcTimingHttpXsdate is a UtcTiming enum value + UtcTimingHttpXsdate = "HTTP-XSDATE" ) // UtcTiming_Values returns all elements of the UtcTiming enum @@ -7213,6 +7221,7 @@ func UtcTiming_Values() []string { UtcTimingNone, UtcTimingHttpHead, UtcTimingHttpIso, + UtcTimingHttpXsdate, } } diff --git a/service/rds/api.go b/service/rds/api.go index 935bed9e306..fe32dcc7cea 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -19715,13 +19715,6 @@ type CreateDBInstanceInput struct { // Constraint: The AvailabilityZone parameter can't be specified if the DB instance // is a Multi-AZ deployment. The specified Availability Zone must be in the // same Amazon Web Services Region as the current endpoint. - // - // If you're creating a DB instance in an RDS on VMware environment, specify - // the identifier of the custom Availability Zone to create the DB instance - // in. - // - // For more information about RDS on VMware, see the RDS on VMware User Guide. - // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) AvailabilityZone *string `type:"string"` // The number of days for which automated backups are retained. Setting this diff --git a/service/redshift/api.go b/service/redshift/api.go index cc683093e12..ed8dc6ad147 100644 --- a/service/redshift/api.go +++ b/service/redshift/api.go @@ -440,7 +440,7 @@ func (c *Redshift) AuthorizeDataShareRequest(input *AuthorizeDataShareInput) (re // // From a data producer account, authorizes the sharing of a datashare with // one or more consumer accounts or managing entities. To authorize a datashare -// for a data consumer, the producer account must have the correct access privileges. +// for a data consumer, the producer account must have the correct access permissions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2678,7 +2678,8 @@ func (c *Redshift) DeauthorizeDataShareRequest(input *DeauthorizeDataShareInput) // DeauthorizeDataShare API operation for Amazon Redshift. // -// From the producer account, removes authorization from the specified datashare. +// From a datashare producer account, removes authorization from the specified +// datashare. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9266,7 +9267,7 @@ func (c *Redshift) DisassociateDataShareConsumerRequest(input *DisassociateDataS // DisassociateDataShareConsumer API operation for Amazon Redshift. // -// From a consumer account, remove association for the specified datashare. +// From a datashare consumer account, remove association for the specified datashare. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9582,7 +9583,7 @@ func (c *Redshift) GetClusterCredentialsRequest(input *GetClusterCredentialsInpu // action with access to the listed dbgroups. // // In addition, if the AutoCreate parameter is set to True, then the policy -// must include the redshift:CreateClusterUser privilege. +// must include the redshift:CreateClusterUser permission. // // If the DbName parameter is specified, the IAM policy must allow access to // the resource dbname for the specified database name. @@ -11768,7 +11769,7 @@ func (c *Redshift) RejectDataShareRequest(input *RejectDataShareInput) (req *req // RejectDataShare API operation for Amazon Redshift. // -// From the consumer account, rejects the specified datashare. +// From a datashare consumer account, rejects the specified datashare. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -13329,7 +13330,7 @@ type AssociateDataShareConsumerOutput struct { // format. DataShareArn *string `type:"string"` - // A value that specifies when the datashare has an association between a producer + // A value that specifies when the datashare has an association between producer // and data consumers. DataShareAssociations []*DataShareAssociation `type:"list"` @@ -13646,7 +13647,7 @@ type AuthorizeDataShareOutput struct { // format. DataShareArn *string `type:"string"` - // A value that specifies when the datashare has an association between a producer + // A value that specifies when the datashare has an association between producer // and data consumers. DataShareAssociations []*DataShareAssociation `type:"list"` @@ -16233,6 +16234,9 @@ type CreateClusterInput struct { // to use to encrypt data in the cluster. KmsKeyId *string `type:"string"` + // A flag that specifies whether to load sample data once the cluster is created. + LoadSampleData *string `type:"string"` + // An optional parameter for the name of the maintenance track for the cluster. // If you don't provide a maintenance track name, the cluster is assigned to // the current track. @@ -16514,6 +16518,12 @@ func (s *CreateClusterInput) SetKmsKeyId(v string) *CreateClusterInput { return s } +// SetLoadSampleData sets the LoadSampleData field's value. +func (s *CreateClusterInput) SetLoadSampleData(v string) *CreateClusterInput { + s.LoadSampleData = &v + return s +} + // SetMaintenanceTrackName sets the MaintenanceTrackName field's value. func (s *CreateClusterInput) SetMaintenanceTrackName(v string) *CreateClusterInput { s.MaintenanceTrackName = &v @@ -18620,7 +18630,7 @@ type DataShare struct { // format. DataShareArn *string `type:"string"` - // A value that specifies when the datashare has an association between a producer + // A value that specifies when the datashare has an association between producer // and data consumers. DataShareAssociations []*DataShareAssociation `type:"list"` @@ -18903,7 +18913,7 @@ type DeauthorizeDataShareOutput struct { // format. DataShareArn *string `type:"string"` - // A value that specifies when the datashare has an association between a producer + // A value that specifies when the datashare has an association between producer // and data consumers. DataShareAssociations []*DataShareAssociation `type:"list"` @@ -25365,7 +25375,7 @@ type DisassociateDataShareConsumerOutput struct { // format. DataShareArn *string `type:"string"` - // A value that specifies when the datashare has an association between a producer + // A value that specifies when the datashare has an association between producer // and data consumers. DataShareAssociations []*DataShareAssociation `type:"list"` @@ -26541,7 +26551,7 @@ type GetClusterCredentialsOutput struct { // A database user name that is authorized to log on to the database DbName // using the password DbPassword. If the specified DbUser exists in the database, - // the new user name has the same database privileges as the the user named + // the new user name has the same database permissions as the the user named // in DbUser. By default, the user is added to PUBLIC. If the DbGroups parameter // is specifed, DbUser is added to the listed groups for any sessions created // using these credentials. @@ -30498,7 +30508,7 @@ type RejectDataShareOutput struct { // format. DataShareArn *string `type:"string"` - // A value that specifies when the datashare has an association between a producer + // A value that specifies when the datashare has an association between producer // and data consumers. DataShareAssociations []*DataShareAssociation `type:"list"` diff --git a/service/securityhub/api.go b/service/securityhub/api.go index 3a30b47d033..f4afd034d7f 100644 --- a/service/securityhub/api.go +++ b/service/securityhub/api.go @@ -482,12 +482,17 @@ func (c *SecurityHub) BatchImportFindingsRequest(input *BatchImportFindingsInput // // BatchImportFindings must be called by one of the following: // -// * The account that is associated with the findings. The identifier of -// the associated account is the value of the AwsAccountId attribute for -// the finding. -// -// * An account that is allow-listed for an official Security Hub partner -// integration. +// * The Amazon Web Services account that is associated with a finding if +// you are using the default product ARN (https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-custom-providers.html#securityhub-custom-providers-bfi-reqs) +// or are a partner sending findings from within a customer's Amazon Web +// Services account. In these cases, the identifier of the account that you +// are calling BatchImportFindings from needs to be the same as the AwsAccountId +// attribute for the finding. +// +// * An Amazon Web Services account that Security Hub has allow-listed for +// an official partner integration. In this case, you can call BatchImportFindings +// from the allow-listed account and send findings from different customer +// accounts in the same batch. // // The maximum allowed size for a finding is 240 Kb. An error is returned for // any finding larger than 240 Kb. diff --git a/service/securityhub/doc.go b/service/securityhub/doc.go index dc169e2a7a4..839b778ff7f 100644 --- a/service/securityhub/doc.go +++ b/service/securityhub/doc.go @@ -33,8 +33,11 @@ // * GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests // per second. // -// * UpdateFindings - RateLimit of 1 request per second. BurstLimit of 5 -// requests per second. +// * BatchImportFindings - RateLimit of 10 requests per second. BurstLimit +// of 30 requests per second. +// +// * BatchUpdateFindings - RateLimit of 10 requests per second. BurstLimit +// of 30 requests per second. // // * UpdateStandardsControl - RateLimit of 1 request per second, BurstLimit // of 5 requests per second.