diff --git a/clients/client-appstream/src/AppStream.ts b/clients/client-appstream/src/AppStream.ts index bb3e6f65e64c..0aac16c7bc28 100644 --- a/clients/client-appstream/src/AppStream.ts +++ b/clients/client-appstream/src/AppStream.ts @@ -6,6 +6,11 @@ import { AssociateApplicationFleetCommandInput, AssociateApplicationFleetCommandOutput, } from "./commands/AssociateApplicationFleetCommand"; +import { + AssociateApplicationToEntitlementCommand, + AssociateApplicationToEntitlementCommandInput, + AssociateApplicationToEntitlementCommandOutput, +} from "./commands/AssociateApplicationToEntitlementCommand"; import { AssociateFleetCommand, AssociateFleetCommandInput, @@ -37,6 +42,11 @@ import { CreateDirectoryConfigCommandInput, CreateDirectoryConfigCommandOutput, } from "./commands/CreateDirectoryConfigCommand"; +import { + CreateEntitlementCommand, + CreateEntitlementCommandInput, + CreateEntitlementCommandOutput, +} from "./commands/CreateEntitlementCommand"; import { CreateFleetCommand, CreateFleetCommandInput, CreateFleetCommandOutput } from "./commands/CreateFleetCommand"; import { CreateImageBuilderCommand, @@ -80,6 +90,11 @@ import { DeleteDirectoryConfigCommandInput, DeleteDirectoryConfigCommandOutput, } from "./commands/DeleteDirectoryConfigCommand"; +import { + DeleteEntitlementCommand, + DeleteEntitlementCommandInput, + DeleteEntitlementCommandOutput, +} from "./commands/DeleteEntitlementCommand"; import { DeleteFleetCommand, DeleteFleetCommandInput, DeleteFleetCommandOutput } from "./commands/DeleteFleetCommand"; import { DeleteImageBuilderCommand, @@ -119,6 +134,11 @@ import { DescribeDirectoryConfigsCommandInput, DescribeDirectoryConfigsCommandOutput, } from "./commands/DescribeDirectoryConfigsCommand"; +import { + DescribeEntitlementsCommand, + DescribeEntitlementsCommandInput, + DescribeEntitlementsCommandOutput, +} from "./commands/DescribeEntitlementsCommand"; import { DescribeFleetsCommand, DescribeFleetsCommandInput, @@ -170,6 +190,11 @@ import { DisassociateApplicationFleetCommandInput, DisassociateApplicationFleetCommandOutput, } from "./commands/DisassociateApplicationFleetCommand"; +import { + DisassociateApplicationFromEntitlementCommand, + DisassociateApplicationFromEntitlementCommandInput, + DisassociateApplicationFromEntitlementCommandOutput, +} from "./commands/DisassociateApplicationFromEntitlementCommand"; import { DisassociateFleetCommand, DisassociateFleetCommandInput, @@ -191,6 +216,11 @@ import { ListAssociatedStacksCommandInput, ListAssociatedStacksCommandOutput, } from "./commands/ListAssociatedStacksCommand"; +import { + ListEntitledApplicationsCommand, + ListEntitledApplicationsCommandInput, + ListEntitledApplicationsCommandOutput, +} from "./commands/ListEntitledApplicationsCommand"; import { ListTagsForResourceCommand, ListTagsForResourceCommandInput, @@ -224,6 +254,11 @@ import { UpdateDirectoryConfigCommandInput, UpdateDirectoryConfigCommandOutput, } from "./commands/UpdateDirectoryConfigCommand"; +import { + UpdateEntitlementCommand, + UpdateEntitlementCommandInput, + UpdateEntitlementCommandOutput, +} from "./commands/UpdateEntitlementCommand"; import { UpdateFleetCommand, UpdateFleetCommandInput, UpdateFleetCommandOutput } from "./commands/UpdateFleetCommand"; import { UpdateImagePermissionsCommand, @@ -288,6 +323,38 @@ export class AppStream extends AppStreamClient { } } + /** + *

Associates an application to entitle.

+ */ + public associateApplicationToEntitlement( + args: AssociateApplicationToEntitlementCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public associateApplicationToEntitlement( + args: AssociateApplicationToEntitlementCommandInput, + cb: (err: any, data?: AssociateApplicationToEntitlementCommandOutput) => void + ): void; + public associateApplicationToEntitlement( + args: AssociateApplicationToEntitlementCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: AssociateApplicationToEntitlementCommandOutput) => void + ): void; + public associateApplicationToEntitlement( + args: AssociateApplicationToEntitlementCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: AssociateApplicationToEntitlementCommandOutput) => void), + cb?: (err: any, data?: AssociateApplicationToEntitlementCommandOutput) => void + ): Promise | void { + const command = new AssociateApplicationToEntitlementCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Associates the specified fleet with the specified stack.

*/ @@ -518,6 +585,43 @@ export class AppStream extends AppStreamClient { } } + /** + *

Creates a new entitlement. Entitlements control access to specific applications within + * a stack, based on user attributes. Entitlements apply to SAML 2.0 federated user + * identities. Amazon AppStream 2.0 user pool and streaming URL users are entitled to all + * applications in a stack. Entitlements don't apply to the desktop stream view + * application, or to applications managed by a dynamic app provider using the Dynamic + * Application Framework.

+ */ + public createEntitlement( + args: CreateEntitlementCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createEntitlement( + args: CreateEntitlementCommandInput, + cb: (err: any, data?: CreateEntitlementCommandOutput) => void + ): void; + public createEntitlement( + args: CreateEntitlementCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateEntitlementCommandOutput) => void + ): void; + public createEntitlement( + args: CreateEntitlementCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateEntitlementCommandOutput) => void), + cb?: (err: any, data?: CreateEntitlementCommandOutput) => void + ): Promise | void { + const command = new CreateEntitlementCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Creates a fleet. A fleet consists of streaming instances that run a specified image when using Always-On or On-Demand.

*/ @@ -856,6 +960,38 @@ export class AppStream extends AppStreamClient { } } + /** + *

Deletes the specified entitlement.

+ */ + public deleteEntitlement( + args: DeleteEntitlementCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteEntitlement( + args: DeleteEntitlementCommandInput, + cb: (err: any, data?: DeleteEntitlementCommandOutput) => void + ): void; + public deleteEntitlement( + args: DeleteEntitlementCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteEntitlementCommandOutput) => void + ): void; + public deleteEntitlement( + args: DeleteEntitlementCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteEntitlementCommandOutput) => void), + cb?: (err: any, data?: DeleteEntitlementCommandOutput) => void + ): Promise | void { + const command = new DeleteEntitlementCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Deletes the specified fleet.

*/ @@ -1187,6 +1323,38 @@ export class AppStream extends AppStreamClient { } } + /** + *

Retrieves a list that describes one of more entitlements.

+ */ + public describeEntitlements( + args: DescribeEntitlementsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeEntitlements( + args: DescribeEntitlementsCommandInput, + cb: (err: any, data?: DescribeEntitlementsCommandOutput) => void + ): void; + public describeEntitlements( + args: DescribeEntitlementsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeEntitlementsCommandOutput) => void + ): void; + public describeEntitlements( + args: DescribeEntitlementsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeEntitlementsCommandOutput) => void), + cb?: (err: any, data?: DescribeEntitlementsCommandOutput) => void + ): Promise | void { + const command = new DescribeEntitlementsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Retrieves a list that describes one or more specified fleets, if the fleet names are provided. Otherwise, all fleets in the account are described.

*/ @@ -1543,6 +1711,40 @@ export class AppStream extends AppStreamClient { } } + /** + *

Deletes the specified application from the specified entitlement.

+ */ + public disassociateApplicationFromEntitlement( + args: DisassociateApplicationFromEntitlementCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disassociateApplicationFromEntitlement( + args: DisassociateApplicationFromEntitlementCommandInput, + cb: (err: any, data?: DisassociateApplicationFromEntitlementCommandOutput) => void + ): void; + public disassociateApplicationFromEntitlement( + args: DisassociateApplicationFromEntitlementCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisassociateApplicationFromEntitlementCommandOutput) => void + ): void; + public disassociateApplicationFromEntitlement( + args: DisassociateApplicationFromEntitlementCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: DisassociateApplicationFromEntitlementCommandOutput) => void), + cb?: (err: any, data?: DisassociateApplicationFromEntitlementCommandOutput) => void + ): Promise | void { + const command = new DisassociateApplicationFromEntitlementCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Disassociates the specified fleet from the specified stack.

*/ @@ -1697,6 +1899,38 @@ export class AppStream extends AppStreamClient { } } + /** + *

Retrieves a list of entitled applications.

+ */ + public listEntitledApplications( + args: ListEntitledApplicationsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listEntitledApplications( + args: ListEntitledApplicationsCommandInput, + cb: (err: any, data?: ListEntitledApplicationsCommandOutput) => void + ): void; + public listEntitledApplications( + args: ListEntitledApplicationsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListEntitledApplicationsCommandOutput) => void + ): void; + public listEntitledApplications( + args: ListEntitledApplicationsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListEntitledApplicationsCommandOutput) => void), + cb?: (err: any, data?: ListEntitledApplicationsCommandOutput) => void + ): Promise | void { + const command = new ListEntitledApplicationsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Retrieves a list of all tags for the specified AppStream 2.0 resource. You can tag AppStream 2.0 image builders, images, fleets, and stacks.

*

For more information about tags, see Tagging Your Resources in the Amazon AppStream 2.0 Administration Guide.

@@ -1976,6 +2210,38 @@ export class AppStream extends AppStreamClient { } } + /** + *

Updates the specified entitlement.

+ */ + public updateEntitlement( + args: UpdateEntitlementCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateEntitlement( + args: UpdateEntitlementCommandInput, + cb: (err: any, data?: UpdateEntitlementCommandOutput) => void + ): void; + public updateEntitlement( + args: UpdateEntitlementCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateEntitlementCommandOutput) => void + ): void; + public updateEntitlement( + args: UpdateEntitlementCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateEntitlementCommandOutput) => void), + cb?: (err: any, data?: UpdateEntitlementCommandOutput) => void + ): Promise | void { + const command = new UpdateEntitlementCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Updates the specified fleet.

*

If the fleet is in the STOPPED state, you can update any attribute except diff --git a/clients/client-appstream/src/AppStreamClient.ts b/clients/client-appstream/src/AppStreamClient.ts index 4ba943d8ad65..5ea2a979785b 100644 --- a/clients/client-appstream/src/AppStreamClient.ts +++ b/clients/client-appstream/src/AppStreamClient.ts @@ -53,6 +53,10 @@ import { AssociateApplicationFleetCommandInput, AssociateApplicationFleetCommandOutput, } from "./commands/AssociateApplicationFleetCommand"; +import { + AssociateApplicationToEntitlementCommandInput, + AssociateApplicationToEntitlementCommandOutput, +} from "./commands/AssociateApplicationToEntitlementCommand"; import { AssociateFleetCommandInput, AssociateFleetCommandOutput } from "./commands/AssociateFleetCommand"; import { BatchAssociateUserStackCommandInput, @@ -69,6 +73,7 @@ import { CreateDirectoryConfigCommandInput, CreateDirectoryConfigCommandOutput, } from "./commands/CreateDirectoryConfigCommand"; +import { CreateEntitlementCommandInput, CreateEntitlementCommandOutput } from "./commands/CreateEntitlementCommand"; import { CreateFleetCommandInput, CreateFleetCommandOutput } from "./commands/CreateFleetCommand"; import { CreateImageBuilderCommandInput, CreateImageBuilderCommandOutput } from "./commands/CreateImageBuilderCommand"; import { @@ -89,6 +94,7 @@ import { DeleteDirectoryConfigCommandInput, DeleteDirectoryConfigCommandOutput, } from "./commands/DeleteDirectoryConfigCommand"; +import { DeleteEntitlementCommandInput, DeleteEntitlementCommandOutput } from "./commands/DeleteEntitlementCommand"; import { DeleteFleetCommandInput, DeleteFleetCommandOutput } from "./commands/DeleteFleetCommand"; import { DeleteImageBuilderCommandInput, DeleteImageBuilderCommandOutput } from "./commands/DeleteImageBuilderCommand"; import { DeleteImageCommandInput, DeleteImageCommandOutput } from "./commands/DeleteImageCommand"; @@ -115,6 +121,10 @@ import { DescribeDirectoryConfigsCommandInput, DescribeDirectoryConfigsCommandOutput, } from "./commands/DescribeDirectoryConfigsCommand"; +import { + DescribeEntitlementsCommandInput, + DescribeEntitlementsCommandOutput, +} from "./commands/DescribeEntitlementsCommand"; import { DescribeFleetsCommandInput, DescribeFleetsCommandOutput } from "./commands/DescribeFleetsCommand"; import { DescribeImageBuildersCommandInput, @@ -141,6 +151,10 @@ import { DisassociateApplicationFleetCommandInput, DisassociateApplicationFleetCommandOutput, } from "./commands/DisassociateApplicationFleetCommand"; +import { + DisassociateApplicationFromEntitlementCommandInput, + DisassociateApplicationFromEntitlementCommandOutput, +} from "./commands/DisassociateApplicationFromEntitlementCommand"; import { DisassociateFleetCommandInput, DisassociateFleetCommandOutput } from "./commands/DisassociateFleetCommand"; import { EnableUserCommandInput, EnableUserCommandOutput } from "./commands/EnableUserCommand"; import { ExpireSessionCommandInput, ExpireSessionCommandOutput } from "./commands/ExpireSessionCommand"; @@ -152,6 +166,10 @@ import { ListAssociatedStacksCommandInput, ListAssociatedStacksCommandOutput, } from "./commands/ListAssociatedStacksCommand"; +import { + ListEntitledApplicationsCommandInput, + ListEntitledApplicationsCommandOutput, +} from "./commands/ListEntitledApplicationsCommand"; import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, @@ -167,6 +185,7 @@ import { UpdateDirectoryConfigCommandInput, UpdateDirectoryConfigCommandOutput, } from "./commands/UpdateDirectoryConfigCommand"; +import { UpdateEntitlementCommandInput, UpdateEntitlementCommandOutput } from "./commands/UpdateEntitlementCommand"; import { UpdateFleetCommandInput, UpdateFleetCommandOutput } from "./commands/UpdateFleetCommand"; import { UpdateImagePermissionsCommandInput, @@ -177,6 +196,7 @@ import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = | AssociateApplicationFleetCommandInput + | AssociateApplicationToEntitlementCommandInput | AssociateFleetCommandInput | BatchAssociateUserStackCommandInput | BatchDisassociateUserStackCommandInput @@ -184,6 +204,7 @@ export type ServiceInputTypes = | CreateAppBlockCommandInput | CreateApplicationCommandInput | CreateDirectoryConfigCommandInput + | CreateEntitlementCommandInput | CreateFleetCommandInput | CreateImageBuilderCommandInput | CreateImageBuilderStreamingURLCommandInput @@ -195,6 +216,7 @@ export type ServiceInputTypes = | DeleteAppBlockCommandInput | DeleteApplicationCommandInput | DeleteDirectoryConfigCommandInput + | DeleteEntitlementCommandInput | DeleteFleetCommandInput | DeleteImageBuilderCommandInput | DeleteImageCommandInput @@ -206,6 +228,7 @@ export type ServiceInputTypes = | DescribeApplicationFleetAssociationsCommandInput | DescribeApplicationsCommandInput | DescribeDirectoryConfigsCommandInput + | DescribeEntitlementsCommandInput | DescribeFleetsCommandInput | DescribeImageBuildersCommandInput | DescribeImagePermissionsCommandInput @@ -217,11 +240,13 @@ export type ServiceInputTypes = | DescribeUsersCommandInput | DisableUserCommandInput | DisassociateApplicationFleetCommandInput + | DisassociateApplicationFromEntitlementCommandInput | DisassociateFleetCommandInput | EnableUserCommandInput | ExpireSessionCommandInput | ListAssociatedFleetsCommandInput | ListAssociatedStacksCommandInput + | ListEntitledApplicationsCommandInput | ListTagsForResourceCommandInput | StartFleetCommandInput | StartImageBuilderCommandInput @@ -231,12 +256,14 @@ export type ServiceInputTypes = | UntagResourceCommandInput | UpdateApplicationCommandInput | UpdateDirectoryConfigCommandInput + | UpdateEntitlementCommandInput | UpdateFleetCommandInput | UpdateImagePermissionsCommandInput | UpdateStackCommandInput; export type ServiceOutputTypes = | AssociateApplicationFleetCommandOutput + | AssociateApplicationToEntitlementCommandOutput | AssociateFleetCommandOutput | BatchAssociateUserStackCommandOutput | BatchDisassociateUserStackCommandOutput @@ -244,6 +271,7 @@ export type ServiceOutputTypes = | CreateAppBlockCommandOutput | CreateApplicationCommandOutput | CreateDirectoryConfigCommandOutput + | CreateEntitlementCommandOutput | CreateFleetCommandOutput | CreateImageBuilderCommandOutput | CreateImageBuilderStreamingURLCommandOutput @@ -255,6 +283,7 @@ export type ServiceOutputTypes = | DeleteAppBlockCommandOutput | DeleteApplicationCommandOutput | DeleteDirectoryConfigCommandOutput + | DeleteEntitlementCommandOutput | DeleteFleetCommandOutput | DeleteImageBuilderCommandOutput | DeleteImageCommandOutput @@ -266,6 +295,7 @@ export type ServiceOutputTypes = | DescribeApplicationFleetAssociationsCommandOutput | DescribeApplicationsCommandOutput | DescribeDirectoryConfigsCommandOutput + | DescribeEntitlementsCommandOutput | DescribeFleetsCommandOutput | DescribeImageBuildersCommandOutput | DescribeImagePermissionsCommandOutput @@ -277,11 +307,13 @@ export type ServiceOutputTypes = | DescribeUsersCommandOutput | DisableUserCommandOutput | DisassociateApplicationFleetCommandOutput + | DisassociateApplicationFromEntitlementCommandOutput | DisassociateFleetCommandOutput | EnableUserCommandOutput | ExpireSessionCommandOutput | ListAssociatedFleetsCommandOutput | ListAssociatedStacksCommandOutput + | ListEntitledApplicationsCommandOutput | ListTagsForResourceCommandOutput | StartFleetCommandOutput | StartImageBuilderCommandOutput @@ -291,6 +323,7 @@ export type ServiceOutputTypes = | UntagResourceCommandOutput | UpdateApplicationCommandOutput | UpdateDirectoryConfigCommandOutput + | UpdateEntitlementCommandOutput | UpdateFleetCommandOutput | UpdateImagePermissionsCommandOutput | UpdateStackCommandOutput; diff --git a/clients/client-appstream/src/commands/AssociateApplicationToEntitlementCommand.ts b/clients/client-appstream/src/commands/AssociateApplicationToEntitlementCommand.ts new file mode 100644 index 000000000000..424b3522b496 --- /dev/null +++ b/clients/client-appstream/src/commands/AssociateApplicationToEntitlementCommand.ts @@ -0,0 +1,103 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AppStreamClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AppStreamClient"; +import { AssociateApplicationToEntitlementRequest, AssociateApplicationToEntitlementResult } from "../models/models_0"; +import { + deserializeAws_json1_1AssociateApplicationToEntitlementCommand, + serializeAws_json1_1AssociateApplicationToEntitlementCommand, +} from "../protocols/Aws_json1_1"; + +export interface AssociateApplicationToEntitlementCommandInput extends AssociateApplicationToEntitlementRequest {} +export interface AssociateApplicationToEntitlementCommandOutput + extends AssociateApplicationToEntitlementResult, + __MetadataBearer {} + +/** + *

Associates an application to entitle.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AppStreamClient, AssociateApplicationToEntitlementCommand } from "@aws-sdk/client-appstream"; // ES Modules import + * // const { AppStreamClient, AssociateApplicationToEntitlementCommand } = require("@aws-sdk/client-appstream"); // CommonJS import + * const client = new AppStreamClient(config); + * const command = new AssociateApplicationToEntitlementCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AssociateApplicationToEntitlementCommandInput} for command's `input` shape. + * @see {@link AssociateApplicationToEntitlementCommandOutput} for command's `response` shape. + * @see {@link AppStreamClientResolvedConfig | config} for AppStreamClient's `config` shape. + * + */ +export class AssociateApplicationToEntitlementCommand extends $Command< + AssociateApplicationToEntitlementCommandInput, + AssociateApplicationToEntitlementCommandOutput, + AppStreamClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AssociateApplicationToEntitlementCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: AppStreamClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "AppStreamClient"; + const commandName = "AssociateApplicationToEntitlementCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: AssociateApplicationToEntitlementRequest.filterSensitiveLog, + outputFilterSensitiveLog: AssociateApplicationToEntitlementResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: AssociateApplicationToEntitlementCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1AssociateApplicationToEntitlementCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1AssociateApplicationToEntitlementCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-appstream/src/commands/CreateEntitlementCommand.ts b/clients/client-appstream/src/commands/CreateEntitlementCommand.ts new file mode 100644 index 000000000000..24362e9f1267 --- /dev/null +++ b/clients/client-appstream/src/commands/CreateEntitlementCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AppStreamClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AppStreamClient"; +import { CreateEntitlementRequest, CreateEntitlementResult } from "../models/models_0"; +import { + deserializeAws_json1_1CreateEntitlementCommand, + serializeAws_json1_1CreateEntitlementCommand, +} from "../protocols/Aws_json1_1"; + +export interface CreateEntitlementCommandInput extends CreateEntitlementRequest {} +export interface CreateEntitlementCommandOutput extends CreateEntitlementResult, __MetadataBearer {} + +/** + *

Creates a new entitlement. Entitlements control access to specific applications within + * a stack, based on user attributes. Entitlements apply to SAML 2.0 federated user + * identities. Amazon AppStream 2.0 user pool and streaming URL users are entitled to all + * applications in a stack. Entitlements don't apply to the desktop stream view + * application, or to applications managed by a dynamic app provider using the Dynamic + * Application Framework.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AppStreamClient, CreateEntitlementCommand } from "@aws-sdk/client-appstream"; // ES Modules import + * // const { AppStreamClient, CreateEntitlementCommand } = require("@aws-sdk/client-appstream"); // CommonJS import + * const client = new AppStreamClient(config); + * const command = new CreateEntitlementCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateEntitlementCommandInput} for command's `input` shape. + * @see {@link CreateEntitlementCommandOutput} for command's `response` shape. + * @see {@link AppStreamClientResolvedConfig | config} for AppStreamClient's `config` shape. + * + */ +export class CreateEntitlementCommand extends $Command< + CreateEntitlementCommandInput, + CreateEntitlementCommandOutput, + AppStreamClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateEntitlementCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: AppStreamClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "AppStreamClient"; + const commandName = "CreateEntitlementCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateEntitlementRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateEntitlementResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateEntitlementCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1CreateEntitlementCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1CreateEntitlementCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-appstream/src/commands/DeleteEntitlementCommand.ts b/clients/client-appstream/src/commands/DeleteEntitlementCommand.ts new file mode 100644 index 000000000000..e65f33bf4cb3 --- /dev/null +++ b/clients/client-appstream/src/commands/DeleteEntitlementCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AppStreamClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AppStreamClient"; +import { DeleteEntitlementRequest, DeleteEntitlementResult } from "../models/models_0"; +import { + deserializeAws_json1_1DeleteEntitlementCommand, + serializeAws_json1_1DeleteEntitlementCommand, +} from "../protocols/Aws_json1_1"; + +export interface DeleteEntitlementCommandInput extends DeleteEntitlementRequest {} +export interface DeleteEntitlementCommandOutput extends DeleteEntitlementResult, __MetadataBearer {} + +/** + *

Deletes the specified entitlement.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AppStreamClient, DeleteEntitlementCommand } from "@aws-sdk/client-appstream"; // ES Modules import + * // const { AppStreamClient, DeleteEntitlementCommand } = require("@aws-sdk/client-appstream"); // CommonJS import + * const client = new AppStreamClient(config); + * const command = new DeleteEntitlementCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteEntitlementCommandInput} for command's `input` shape. + * @see {@link DeleteEntitlementCommandOutput} for command's `response` shape. + * @see {@link AppStreamClientResolvedConfig | config} for AppStreamClient's `config` shape. + * + */ +export class DeleteEntitlementCommand extends $Command< + DeleteEntitlementCommandInput, + DeleteEntitlementCommandOutput, + AppStreamClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteEntitlementCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: AppStreamClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "AppStreamClient"; + const commandName = "DeleteEntitlementCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteEntitlementRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteEntitlementResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteEntitlementCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DeleteEntitlementCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1DeleteEntitlementCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-appstream/src/commands/DescribeEntitlementsCommand.ts b/clients/client-appstream/src/commands/DescribeEntitlementsCommand.ts new file mode 100644 index 000000000000..0717451649f5 --- /dev/null +++ b/clients/client-appstream/src/commands/DescribeEntitlementsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AppStreamClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AppStreamClient"; +import { DescribeEntitlementsRequest, DescribeEntitlementsResult } from "../models/models_0"; +import { + deserializeAws_json1_1DescribeEntitlementsCommand, + serializeAws_json1_1DescribeEntitlementsCommand, +} from "../protocols/Aws_json1_1"; + +export interface DescribeEntitlementsCommandInput extends DescribeEntitlementsRequest {} +export interface DescribeEntitlementsCommandOutput extends DescribeEntitlementsResult, __MetadataBearer {} + +/** + *

Retrieves a list that describes one of more entitlements.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AppStreamClient, DescribeEntitlementsCommand } from "@aws-sdk/client-appstream"; // ES Modules import + * // const { AppStreamClient, DescribeEntitlementsCommand } = require("@aws-sdk/client-appstream"); // CommonJS import + * const client = new AppStreamClient(config); + * const command = new DescribeEntitlementsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeEntitlementsCommandInput} for command's `input` shape. + * @see {@link DescribeEntitlementsCommandOutput} for command's `response` shape. + * @see {@link AppStreamClientResolvedConfig | config} for AppStreamClient's `config` shape. + * + */ +export class DescribeEntitlementsCommand extends $Command< + DescribeEntitlementsCommandInput, + DescribeEntitlementsCommandOutput, + AppStreamClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeEntitlementsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: AppStreamClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "AppStreamClient"; + const commandName = "DescribeEntitlementsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeEntitlementsRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeEntitlementsResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribeEntitlementsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DescribeEntitlementsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1DescribeEntitlementsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-appstream/src/commands/DisassociateApplicationFromEntitlementCommand.ts b/clients/client-appstream/src/commands/DisassociateApplicationFromEntitlementCommand.ts new file mode 100644 index 000000000000..02a76620147c --- /dev/null +++ b/clients/client-appstream/src/commands/DisassociateApplicationFromEntitlementCommand.ts @@ -0,0 +1,107 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AppStreamClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AppStreamClient"; +import { + DisassociateApplicationFromEntitlementRequest, + DisassociateApplicationFromEntitlementResult, +} from "../models/models_0"; +import { + deserializeAws_json1_1DisassociateApplicationFromEntitlementCommand, + serializeAws_json1_1DisassociateApplicationFromEntitlementCommand, +} from "../protocols/Aws_json1_1"; + +export interface DisassociateApplicationFromEntitlementCommandInput + extends DisassociateApplicationFromEntitlementRequest {} +export interface DisassociateApplicationFromEntitlementCommandOutput + extends DisassociateApplicationFromEntitlementResult, + __MetadataBearer {} + +/** + *

Deletes the specified application from the specified entitlement.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AppStreamClient, DisassociateApplicationFromEntitlementCommand } from "@aws-sdk/client-appstream"; // ES Modules import + * // const { AppStreamClient, DisassociateApplicationFromEntitlementCommand } = require("@aws-sdk/client-appstream"); // CommonJS import + * const client = new AppStreamClient(config); + * const command = new DisassociateApplicationFromEntitlementCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisassociateApplicationFromEntitlementCommandInput} for command's `input` shape. + * @see {@link DisassociateApplicationFromEntitlementCommandOutput} for command's `response` shape. + * @see {@link AppStreamClientResolvedConfig | config} for AppStreamClient's `config` shape. + * + */ +export class DisassociateApplicationFromEntitlementCommand extends $Command< + DisassociateApplicationFromEntitlementCommandInput, + DisassociateApplicationFromEntitlementCommandOutput, + AppStreamClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisassociateApplicationFromEntitlementCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: AppStreamClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "AppStreamClient"; + const commandName = "DisassociateApplicationFromEntitlementCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisassociateApplicationFromEntitlementRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisassociateApplicationFromEntitlementResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DisassociateApplicationFromEntitlementCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1DisassociateApplicationFromEntitlementCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DisassociateApplicationFromEntitlementCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-appstream/src/commands/ListEntitledApplicationsCommand.ts b/clients/client-appstream/src/commands/ListEntitledApplicationsCommand.ts new file mode 100644 index 000000000000..1520ca66bacc --- /dev/null +++ b/clients/client-appstream/src/commands/ListEntitledApplicationsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AppStreamClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AppStreamClient"; +import { ListEntitledApplicationsRequest, ListEntitledApplicationsResult } from "../models/models_0"; +import { + deserializeAws_json1_1ListEntitledApplicationsCommand, + serializeAws_json1_1ListEntitledApplicationsCommand, +} from "../protocols/Aws_json1_1"; + +export interface ListEntitledApplicationsCommandInput extends ListEntitledApplicationsRequest {} +export interface ListEntitledApplicationsCommandOutput extends ListEntitledApplicationsResult, __MetadataBearer {} + +/** + *

Retrieves a list of entitled applications.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AppStreamClient, ListEntitledApplicationsCommand } from "@aws-sdk/client-appstream"; // ES Modules import + * // const { AppStreamClient, ListEntitledApplicationsCommand } = require("@aws-sdk/client-appstream"); // CommonJS import + * const client = new AppStreamClient(config); + * const command = new ListEntitledApplicationsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListEntitledApplicationsCommandInput} for command's `input` shape. + * @see {@link ListEntitledApplicationsCommandOutput} for command's `response` shape. + * @see {@link AppStreamClientResolvedConfig | config} for AppStreamClient's `config` shape. + * + */ +export class ListEntitledApplicationsCommand extends $Command< + ListEntitledApplicationsCommandInput, + ListEntitledApplicationsCommandOutput, + AppStreamClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListEntitledApplicationsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: AppStreamClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "AppStreamClient"; + const commandName = "ListEntitledApplicationsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListEntitledApplicationsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListEntitledApplicationsResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListEntitledApplicationsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1ListEntitledApplicationsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1ListEntitledApplicationsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-appstream/src/commands/UpdateEntitlementCommand.ts b/clients/client-appstream/src/commands/UpdateEntitlementCommand.ts new file mode 100644 index 000000000000..bcf9b2d51907 --- /dev/null +++ b/clients/client-appstream/src/commands/UpdateEntitlementCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AppStreamClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AppStreamClient"; +import { UpdateEntitlementRequest, UpdateEntitlementResult } from "../models/models_0"; +import { + deserializeAws_json1_1UpdateEntitlementCommand, + serializeAws_json1_1UpdateEntitlementCommand, +} from "../protocols/Aws_json1_1"; + +export interface UpdateEntitlementCommandInput extends UpdateEntitlementRequest {} +export interface UpdateEntitlementCommandOutput extends UpdateEntitlementResult, __MetadataBearer {} + +/** + *

Updates the specified entitlement.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AppStreamClient, UpdateEntitlementCommand } from "@aws-sdk/client-appstream"; // ES Modules import + * // const { AppStreamClient, UpdateEntitlementCommand } = require("@aws-sdk/client-appstream"); // CommonJS import + * const client = new AppStreamClient(config); + * const command = new UpdateEntitlementCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateEntitlementCommandInput} for command's `input` shape. + * @see {@link UpdateEntitlementCommandOutput} for command's `response` shape. + * @see {@link AppStreamClientResolvedConfig | config} for AppStreamClient's `config` shape. + * + */ +export class UpdateEntitlementCommand extends $Command< + UpdateEntitlementCommandInput, + UpdateEntitlementCommandOutput, + AppStreamClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateEntitlementCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: AppStreamClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "AppStreamClient"; + const commandName = "UpdateEntitlementCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateEntitlementRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateEntitlementResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateEntitlementCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1UpdateEntitlementCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1UpdateEntitlementCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-appstream/src/commands/index.ts b/clients/client-appstream/src/commands/index.ts index a36730a130e5..8f4d08f800e9 100644 --- a/clients/client-appstream/src/commands/index.ts +++ b/clients/client-appstream/src/commands/index.ts @@ -1,4 +1,5 @@ export * from "./AssociateApplicationFleetCommand"; +export * from "./AssociateApplicationToEntitlementCommand"; export * from "./AssociateFleetCommand"; export * from "./BatchAssociateUserStackCommand"; export * from "./BatchDisassociateUserStackCommand"; @@ -6,6 +7,7 @@ export * from "./CopyImageCommand"; export * from "./CreateAppBlockCommand"; export * from "./CreateApplicationCommand"; export * from "./CreateDirectoryConfigCommand"; +export * from "./CreateEntitlementCommand"; export * from "./CreateFleetCommand"; export * from "./CreateImageBuilderCommand"; export * from "./CreateImageBuilderStreamingURLCommand"; @@ -17,6 +19,7 @@ export * from "./CreateUserCommand"; export * from "./DeleteAppBlockCommand"; export * from "./DeleteApplicationCommand"; export * from "./DeleteDirectoryConfigCommand"; +export * from "./DeleteEntitlementCommand"; export * from "./DeleteFleetCommand"; export * from "./DeleteImageBuilderCommand"; export * from "./DeleteImageCommand"; @@ -28,6 +31,7 @@ export * from "./DescribeAppBlocksCommand"; export * from "./DescribeApplicationFleetAssociationsCommand"; export * from "./DescribeApplicationsCommand"; export * from "./DescribeDirectoryConfigsCommand"; +export * from "./DescribeEntitlementsCommand"; export * from "./DescribeFleetsCommand"; export * from "./DescribeImageBuildersCommand"; export * from "./DescribeImagePermissionsCommand"; @@ -39,11 +43,13 @@ export * from "./DescribeUserStackAssociationsCommand"; export * from "./DescribeUsersCommand"; export * from "./DisableUserCommand"; export * from "./DisassociateApplicationFleetCommand"; +export * from "./DisassociateApplicationFromEntitlementCommand"; export * from "./DisassociateFleetCommand"; export * from "./EnableUserCommand"; export * from "./ExpireSessionCommand"; export * from "./ListAssociatedFleetsCommand"; export * from "./ListAssociatedStacksCommand"; +export * from "./ListEntitledApplicationsCommand"; export * from "./ListTagsForResourceCommand"; export * from "./StartFleetCommand"; export * from "./StartImageBuilderCommand"; @@ -53,6 +59,7 @@ export * from "./TagResourceCommand"; export * from "./UntagResourceCommand"; export * from "./UpdateApplicationCommand"; export * from "./UpdateDirectoryConfigCommand"; +export * from "./UpdateEntitlementCommand"; export * from "./UpdateFleetCommand"; export * from "./UpdateImagePermissionsCommand"; export * from "./UpdateStackCommand"; diff --git a/clients/client-appstream/src/models/models_0.ts b/clients/client-appstream/src/models/models_0.ts index 477490a039c4..b9ce41f7d9fe 100644 --- a/clients/client-appstream/src/models/models_0.ts +++ b/clients/client-appstream/src/models/models_0.ts @@ -326,6 +326,11 @@ export namespace ApplicationSettingsResponse { }); } +export enum AppVisibility { + ALL = "ALL", + ASSOCIATED = "ASSOCIATED", +} + export interface AssociateApplicationFleetRequest { /** *

The name of the fleet.

@@ -470,6 +475,64 @@ export namespace ResourceNotFoundException { }); } +export interface AssociateApplicationToEntitlementRequest { + /** + *

The name of the stack.

+ */ + StackName: string | undefined; + + /** + *

The name of the entitlement.

+ */ + EntitlementName: string | undefined; + + /** + *

The identifier of the application.

+ */ + ApplicationIdentifier: string | undefined; +} + +export namespace AssociateApplicationToEntitlementRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateApplicationToEntitlementRequest): any => ({ + ...obj, + }); +} + +export interface AssociateApplicationToEntitlementResult {} + +export namespace AssociateApplicationToEntitlementResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateApplicationToEntitlementResult): any => ({ + ...obj, + }); +} + +/** + *

The entitlement can't be found.

+ */ +export interface EntitlementNotFoundException extends __SmithyException, $MetadataBearer { + name: "EntitlementNotFoundException"; + $fault: "client"; + /** + *

The error message in the exception.

+ */ + Message?: string; +} + +export namespace EntitlementNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EntitlementNotFoundException): any => ({ + ...obj, + }); +} + export interface AssociateFleetRequest { /** *

The name of the fleet.

@@ -1114,6 +1177,187 @@ export namespace InvalidRoleException { }); } +/** + *

An attribute associated with an entitlement. Application entitlements work by matching + * a supported SAML 2.0 attribute name to a value when a user identity federates to an + * Amazon AppStream 2.0 SAML application.

+ */ +export interface EntitlementAttribute { + /** + *

A supported AWS IAM SAML PrincipalTag attribute that is matched to the + * associated value when a user identity federates into an Amazon AppStream 2.0 SAML + * application.

+ *

The following are valid values:

+ *
    + *
  • + *

    roles

    + *
  • + *
  • + *

    department

    + *
  • + *
  • + *

    organization

    + *
  • + *
  • + *

    groups

    + *
  • + *
  • + *

    title

    + *
  • + *
  • + *

    costCenter

    + *
  • + *
  • + *

    userType

    + *
  • + *
+ *

+ */ + Name: string | undefined; + + /** + *

A value that is matched to a supported SAML attribute name when a user identity + * federates into an Amazon AppStream 2.0 SAML application.

+ */ + Value: string | undefined; +} + +export namespace EntitlementAttribute { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EntitlementAttribute): any => ({ + ...obj, + }); +} + +export interface CreateEntitlementRequest { + /** + *

The name of the entitlement.

+ */ + Name: string | undefined; + + /** + *

The name of the stack with which the entitlement is associated.

+ */ + StackName: string | undefined; + + /** + *

The description of the entitlement.

+ */ + Description?: string; + + /** + *

Specifies whether all or selected apps are entitled.

+ */ + AppVisibility: AppVisibility | string | undefined; + + /** + *

The attributes of the entitlement.

+ */ + Attributes: EntitlementAttribute[] | undefined; +} + +export namespace CreateEntitlementRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEntitlementRequest): any => ({ + ...obj, + }); +} + +/** + *

Specifies an entitlement. Entitlements control access to specific applications within + * a stack, based on user attributes. Entitlements apply to SAML 2.0 federated user + * identities. Amazon AppStream 2.0 user pool and streaming URL users are entitled to all + * applications in a stack. Entitlements don't apply to the desktop stream view + * application, or to applications managed by a dynamic app provider using the Dynamic + * Application Framework.

+ */ +export interface Entitlement { + /** + *

The name of the entitlement.

+ */ + Name: string | undefined; + + /** + *

The name of the stack with which the entitlement is associated.

+ */ + StackName: string | undefined; + + /** + *

The description of the entitlement.

+ */ + Description?: string; + + /** + *

Specifies whether all or selected apps are entitled.

+ */ + AppVisibility: AppVisibility | string | undefined; + + /** + *

The attributes of the entitlement.

+ */ + Attributes: EntitlementAttribute[] | undefined; + + /** + *

The time when the entitlement was created.

+ */ + CreatedTime?: Date; + + /** + *

The time when the entitlement was last modified.

+ */ + LastModifiedTime?: Date; +} + +export namespace Entitlement { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Entitlement): any => ({ + ...obj, + }); +} + +export interface CreateEntitlementResult { + /** + *

The entitlement.

+ */ + Entitlement?: Entitlement; +} + +export namespace CreateEntitlementResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEntitlementResult): any => ({ + ...obj, + }); +} + +/** + *

The entitlement already exists.

+ */ +export interface EntitlementAlreadyExistsException extends __SmithyException, $MetadataBearer { + name: "EntitlementAlreadyExistsException"; + $fault: "client"; + /** + *

The error message in the exception.

+ */ + Message?: string; +} + +export namespace EntitlementAlreadyExistsException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EntitlementAlreadyExistsException): any => ({ + ...obj, + }); +} + /** *

Describes the configuration information required to join fleets and image builders to Microsoft Active Directory domains.

*/ @@ -3116,6 +3360,38 @@ export namespace DeleteDirectoryConfigResult { }); } +export interface DeleteEntitlementRequest { + /** + *

The name of the entitlement.

+ */ + Name: string | undefined; + + /** + *

The name of the stack with which the entitlement is associated.

+ */ + StackName: string | undefined; +} + +export namespace DeleteEntitlementRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEntitlementRequest): any => ({ + ...obj, + }); +} + +export interface DeleteEntitlementResult {} + +export namespace DeleteEntitlementResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEntitlementResult): any => ({ + ...obj, + }); +} + export interface DeleteFleetRequest { /** *

The name of the fleet.

@@ -3527,6 +3803,59 @@ export namespace DescribeDirectoryConfigsResult { }); } +export interface DescribeEntitlementsRequest { + /** + *

The name of the entitlement.

+ */ + Name?: string; + + /** + *

The name of the stack with which the entitlement is associated.

+ */ + StackName: string | undefined; + + /** + *

The pagination token used to retrieve the next page of results for this operation.

+ */ + NextToken?: string; + + /** + *

The maximum size of each page of results.

+ */ + MaxResults?: number; +} + +export namespace DescribeEntitlementsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeEntitlementsRequest): any => ({ + ...obj, + }); +} + +export interface DescribeEntitlementsResult { + /** + *

The entitlements.

+ */ + Entitlements?: Entitlement[]; + + /** + *

The pagination token used to retrieve the next page of results for this + * operation.

+ */ + NextToken?: string; +} + +export namespace DescribeEntitlementsResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeEntitlementsResult): any => ({ + ...obj, + }); +} + export interface DescribeFleetsRequest { /** *

The names of the fleets to describe.

@@ -4308,6 +4637,43 @@ export namespace DisassociateApplicationFleetResult { }); } +export interface DisassociateApplicationFromEntitlementRequest { + /** + *

The name of the stack with which the entitlement is associated.

+ */ + StackName: string | undefined; + + /** + *

The name of the entitlement.

+ */ + EntitlementName: string | undefined; + + /** + *

The identifier of the application to remove from the entitlement.

+ */ + ApplicationIdentifier: string | undefined; +} + +export namespace DisassociateApplicationFromEntitlementRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateApplicationFromEntitlementRequest): any => ({ + ...obj, + }); +} + +export interface DisassociateApplicationFromEntitlementResult {} + +export namespace DisassociateApplicationFromEntitlementResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateApplicationFromEntitlementResult): any => ({ + ...obj, + }); +} + export interface DisassociateFleetRequest { /** *

The name of the fleet.

@@ -4377,6 +4743,25 @@ export namespace EnableUserResult { }); } +/** + *

The application associated to an entitlement. Access is controlled based on user attributes.

+ */ +export interface EntitledApplication { + /** + *

The identifier of the application.

+ */ + ApplicationIdentifier: string | undefined; +} + +export namespace EntitledApplication { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EntitledApplication): any => ({ + ...obj, + }); +} + export interface ExpireSessionRequest { /** *

The identifier of the streaming session.

@@ -4496,6 +4881,58 @@ export namespace ListAssociatedStacksResult { }); } +export interface ListEntitledApplicationsRequest { + /** + *

The name of the stack with which the entitlement is associated.

+ */ + StackName: string | undefined; + + /** + *

The name of the entitlement.

+ */ + EntitlementName: string | undefined; + + /** + *

The pagination token used to retrieve the next page of results for this operation.

+ */ + NextToken?: string; + + /** + *

The maximum size of each page of results.

+ */ + MaxResults?: number; +} + +export namespace ListEntitledApplicationsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListEntitledApplicationsRequest): any => ({ + ...obj, + }); +} + +export interface ListEntitledApplicationsResult { + /** + *

The entitled applications.

+ */ + EntitledApplications?: EntitledApplication[]; + + /** + *

The pagination token used to retrieve the next page of results for this operation.

+ */ + NextToken?: string; +} + +export namespace ListEntitledApplicationsResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListEntitledApplicationsResult): any => ({ + ...obj, + }); +} + export interface ListTagsForResourceRequest { /** *

The Amazon Resource Name (ARN) of the resource.

@@ -4838,6 +5275,58 @@ export namespace UpdateDirectoryConfigResult { }); } +export interface UpdateEntitlementRequest { + /** + *

The name of the entitlement.

+ */ + Name: string | undefined; + + /** + *

The name of the stack with which the entitlement is associated.

+ */ + StackName: string | undefined; + + /** + *

The description of the entitlement.

+ */ + Description?: string; + + /** + *

Specifies whether all or only selected apps are entitled.

+ */ + AppVisibility?: AppVisibility | string; + + /** + *

The attributes of the entitlement.

+ */ + Attributes?: EntitlementAttribute[]; +} + +export namespace UpdateEntitlementRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEntitlementRequest): any => ({ + ...obj, + }); +} + +export interface UpdateEntitlementResult { + /** + *

The entitlement.

+ */ + Entitlement?: Entitlement; +} + +export namespace UpdateEntitlementResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEntitlementResult): any => ({ + ...obj, + }); +} + export interface UpdateFleetRequest { /** *

The name of the image used to create the fleet.

diff --git a/clients/client-appstream/src/protocols/Aws_json1_1.ts b/clients/client-appstream/src/protocols/Aws_json1_1.ts index 14fec1b8dfac..c3b7acc91da0 100644 --- a/clients/client-appstream/src/protocols/Aws_json1_1.ts +++ b/clients/client-appstream/src/protocols/Aws_json1_1.ts @@ -20,6 +20,10 @@ import { AssociateApplicationFleetCommandInput, AssociateApplicationFleetCommandOutput, } from "../commands/AssociateApplicationFleetCommand"; +import { + AssociateApplicationToEntitlementCommandInput, + AssociateApplicationToEntitlementCommandOutput, +} from "../commands/AssociateApplicationToEntitlementCommand"; import { AssociateFleetCommandInput, AssociateFleetCommandOutput } from "../commands/AssociateFleetCommand"; import { BatchAssociateUserStackCommandInput, @@ -36,6 +40,7 @@ import { CreateDirectoryConfigCommandInput, CreateDirectoryConfigCommandOutput, } from "../commands/CreateDirectoryConfigCommand"; +import { CreateEntitlementCommandInput, CreateEntitlementCommandOutput } from "../commands/CreateEntitlementCommand"; import { CreateFleetCommandInput, CreateFleetCommandOutput } from "../commands/CreateFleetCommand"; import { CreateImageBuilderCommandInput, CreateImageBuilderCommandOutput } from "../commands/CreateImageBuilderCommand"; import { @@ -56,6 +61,7 @@ import { DeleteDirectoryConfigCommandInput, DeleteDirectoryConfigCommandOutput, } from "../commands/DeleteDirectoryConfigCommand"; +import { DeleteEntitlementCommandInput, DeleteEntitlementCommandOutput } from "../commands/DeleteEntitlementCommand"; import { DeleteFleetCommandInput, DeleteFleetCommandOutput } from "../commands/DeleteFleetCommand"; import { DeleteImageBuilderCommandInput, DeleteImageBuilderCommandOutput } from "../commands/DeleteImageBuilderCommand"; import { DeleteImageCommandInput, DeleteImageCommandOutput } from "../commands/DeleteImageCommand"; @@ -82,6 +88,10 @@ import { DescribeDirectoryConfigsCommandInput, DescribeDirectoryConfigsCommandOutput, } from "../commands/DescribeDirectoryConfigsCommand"; +import { + DescribeEntitlementsCommandInput, + DescribeEntitlementsCommandOutput, +} from "../commands/DescribeEntitlementsCommand"; import { DescribeFleetsCommandInput, DescribeFleetsCommandOutput } from "../commands/DescribeFleetsCommand"; import { DescribeImageBuildersCommandInput, @@ -108,6 +118,10 @@ import { DisassociateApplicationFleetCommandInput, DisassociateApplicationFleetCommandOutput, } from "../commands/DisassociateApplicationFleetCommand"; +import { + DisassociateApplicationFromEntitlementCommandInput, + DisassociateApplicationFromEntitlementCommandOutput, +} from "../commands/DisassociateApplicationFromEntitlementCommand"; import { DisassociateFleetCommandInput, DisassociateFleetCommandOutput } from "../commands/DisassociateFleetCommand"; import { EnableUserCommandInput, EnableUserCommandOutput } from "../commands/EnableUserCommand"; import { ExpireSessionCommandInput, ExpireSessionCommandOutput } from "../commands/ExpireSessionCommand"; @@ -119,6 +133,10 @@ import { ListAssociatedStacksCommandInput, ListAssociatedStacksCommandOutput, } from "../commands/ListAssociatedStacksCommand"; +import { + ListEntitledApplicationsCommandInput, + ListEntitledApplicationsCommandOutput, +} from "../commands/ListEntitledApplicationsCommand"; import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, @@ -134,6 +152,7 @@ import { UpdateDirectoryConfigCommandInput, UpdateDirectoryConfigCommandOutput, } from "../commands/UpdateDirectoryConfigCommand"; +import { UpdateEntitlementCommandInput, UpdateEntitlementCommandOutput } from "../commands/UpdateEntitlementCommand"; import { UpdateFleetCommandInput, UpdateFleetCommandOutput } from "../commands/UpdateFleetCommand"; import { UpdateImagePermissionsCommandInput, @@ -150,6 +169,8 @@ import { ApplicationSettingsResponse, AssociateApplicationFleetRequest, AssociateApplicationFleetResult, + AssociateApplicationToEntitlementRequest, + AssociateApplicationToEntitlementResult, AssociateFleetRequest, AssociateFleetResult, BatchAssociateUserStackRequest, @@ -167,6 +188,8 @@ import { CreateApplicationResult, CreateDirectoryConfigRequest, CreateDirectoryConfigResult, + CreateEntitlementRequest, + CreateEntitlementResult, CreateFleetRequest, CreateFleetResult, CreateImageBuilderRequest, @@ -189,6 +212,8 @@ import { DeleteApplicationResult, DeleteDirectoryConfigRequest, DeleteDirectoryConfigResult, + DeleteEntitlementRequest, + DeleteEntitlementResult, DeleteFleetRequest, DeleteFleetResult, DeleteImageBuilderRequest, @@ -211,6 +236,8 @@ import { DescribeApplicationsResult, DescribeDirectoryConfigsRequest, DescribeDirectoryConfigsResult, + DescribeEntitlementsRequest, + DescribeEntitlementsResult, DescribeFleetsRequest, DescribeFleetsResult, DescribeImageBuildersRequest, @@ -234,11 +261,18 @@ import { DisableUserResult, DisassociateApplicationFleetRequest, DisassociateApplicationFleetResult, + DisassociateApplicationFromEntitlementRequest, + DisassociateApplicationFromEntitlementResult, DisassociateFleetRequest, DisassociateFleetResult, DomainJoinInfo, EnableUserRequest, EnableUserResult, + EntitledApplication, + Entitlement, + EntitlementAlreadyExistsException, + EntitlementAttribute, + EntitlementNotFoundException, ExpireSessionRequest, ExpireSessionResult, Fleet, @@ -259,6 +293,8 @@ import { ListAssociatedFleetsResult, ListAssociatedStacksRequest, ListAssociatedStacksResult, + ListEntitledApplicationsRequest, + ListEntitledApplicationsResult, ListTagsForResourceRequest, ListTagsForResourceResponse, NetworkAccessConfiguration, @@ -295,6 +331,8 @@ import { UpdateApplicationResult, UpdateDirectoryConfigRequest, UpdateDirectoryConfigResult, + UpdateEntitlementRequest, + UpdateEntitlementResult, UpdateFleetRequest, UpdateFleetResult, UpdateImagePermissionsRequest, @@ -322,6 +360,19 @@ export const serializeAws_json1_1AssociateApplicationFleetCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1AssociateApplicationToEntitlementCommand = async ( + input: AssociateApplicationToEntitlementCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "PhotonAdminProxyService.AssociateApplicationToEntitlement", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1AssociateApplicationToEntitlementRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1AssociateFleetCommand = async ( input: AssociateFleetCommandInput, context: __SerdeContext @@ -413,6 +464,19 @@ export const serializeAws_json1_1CreateDirectoryConfigCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1CreateEntitlementCommand = async ( + input: CreateEntitlementCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "PhotonAdminProxyService.CreateEntitlement", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1CreateEntitlementRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1CreateFleetCommand = async ( input: CreateFleetCommandInput, context: __SerdeContext @@ -556,6 +620,19 @@ export const serializeAws_json1_1DeleteDirectoryConfigCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DeleteEntitlementCommand = async ( + input: DeleteEntitlementCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "PhotonAdminProxyService.DeleteEntitlement", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DeleteEntitlementRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DeleteFleetCommand = async ( input: DeleteFleetCommandInput, context: __SerdeContext @@ -699,6 +776,19 @@ export const serializeAws_json1_1DescribeDirectoryConfigsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DescribeEntitlementsCommand = async ( + input: DescribeEntitlementsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "PhotonAdminProxyService.DescribeEntitlements", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DescribeEntitlementsRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DescribeFleetsCommand = async ( input: DescribeFleetsCommandInput, context: __SerdeContext @@ -842,6 +932,19 @@ export const serializeAws_json1_1DisassociateApplicationFleetCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DisassociateApplicationFromEntitlementCommand = async ( + input: DisassociateApplicationFromEntitlementCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "PhotonAdminProxyService.DisassociateApplicationFromEntitlement", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DisassociateApplicationFromEntitlementRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DisassociateFleetCommand = async ( input: DisassociateFleetCommandInput, context: __SerdeContext @@ -907,6 +1010,19 @@ export const serializeAws_json1_1ListAssociatedStacksCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1ListEntitledApplicationsCommand = async ( + input: ListEntitledApplicationsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "PhotonAdminProxyService.ListEntitledApplications", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1ListEntitledApplicationsRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1ListTagsForResourceCommand = async ( input: ListTagsForResourceCommandInput, context: __SerdeContext @@ -1024,6 +1140,19 @@ export const serializeAws_json1_1UpdateDirectoryConfigCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1UpdateEntitlementCommand = async ( + input: UpdateEntitlementCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "PhotonAdminProxyService.UpdateEntitlement", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1UpdateEntitlementRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1UpdateFleetCommand = async ( input: UpdateFleetCommandInput, context: __SerdeContext @@ -1149,6 +1278,84 @@ const deserializeAws_json1_1AssociateApplicationFleetCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1AssociateApplicationToEntitlementCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1AssociateApplicationToEntitlementCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1AssociateApplicationToEntitlementResult(data, context); + const response: AssociateApplicationToEntitlementCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1AssociateApplicationToEntitlementCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntitlementNotFoundException": + case "com.amazonaws.appstream#EntitlementNotFoundException": + response = { + ...(await deserializeAws_json1_1EntitlementNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "LimitExceededException": + case "com.amazonaws.appstream#LimitExceededException": + response = { + ...(await deserializeAws_json1_1LimitExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.appstream#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.appstream#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1AssociateFleetCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1719,6 +1926,84 @@ const deserializeAws_json1_1CreateDirectoryConfigCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1CreateEntitlementCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1CreateEntitlementCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1CreateEntitlementResult(data, context); + const response: CreateEntitlementCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1CreateEntitlementCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntitlementAlreadyExistsException": + case "com.amazonaws.appstream#EntitlementAlreadyExistsException": + response = { + ...(await deserializeAws_json1_1EntitlementAlreadyExistsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "LimitExceededException": + case "com.amazonaws.appstream#LimitExceededException": + response = { + ...(await deserializeAws_json1_1LimitExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.appstream#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.appstream#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1CreateFleetCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2697,27 +2982,27 @@ const deserializeAws_json1_1DeleteDirectoryConfigCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DeleteFleetCommand = async ( +export const deserializeAws_json1_1DeleteEntitlementCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DeleteFleetCommandError(output, context); + return deserializeAws_json1_1DeleteEntitlementCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DeleteFleetResult(data, context); - const response: DeleteFleetCommandOutput = { + contents = deserializeAws_json1_1DeleteEntitlementResult(data, context); + const response: DeleteEntitlementCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DeleteFleetCommandError = async ( +const deserializeAws_json1_1DeleteEntitlementCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2734,10 +3019,18 @@ const deserializeAws_json1_1DeleteFleetCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "ResourceInUseException": - case "com.amazonaws.appstream#ResourceInUseException": + case "EntitlementNotFoundException": + case "com.amazonaws.appstream#EntitlementNotFoundException": response = { - ...(await deserializeAws_json1_1ResourceInUseExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EntitlementNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.appstream#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2767,27 +3060,27 @@ const deserializeAws_json1_1DeleteFleetCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DeleteImageCommand = async ( +export const deserializeAws_json1_1DeleteFleetCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DeleteImageCommandError(output, context); + return deserializeAws_json1_1DeleteFleetCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DeleteImageResult(data, context); - const response: DeleteImageCommandOutput = { + contents = deserializeAws_json1_1DeleteFleetResult(data, context); + const response: DeleteFleetCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DeleteImageCommandError = async ( +const deserializeAws_json1_1DeleteFleetCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2804,14 +3097,6 @@ const deserializeAws_json1_1DeleteImageCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "OperationNotPermittedException": - case "com.amazonaws.appstream#OperationNotPermittedException": - response = { - ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "ResourceInUseException": case "com.amazonaws.appstream#ResourceInUseException": response = { @@ -2845,27 +3130,105 @@ const deserializeAws_json1_1DeleteImageCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DeleteImageBuilderCommand = async ( +export const deserializeAws_json1_1DeleteImageCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DeleteImageBuilderCommandError(output, context); + return deserializeAws_json1_1DeleteImageCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DeleteImageBuilderResult(data, context); - const response: DeleteImageBuilderCommandOutput = { + contents = deserializeAws_json1_1DeleteImageResult(data, context); + const response: DeleteImageCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DeleteImageBuilderCommandError = async ( +const deserializeAws_json1_1DeleteImageCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConcurrentModificationException": + case "com.amazonaws.appstream#ConcurrentModificationException": + response = { + ...(await deserializeAws_json1_1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.appstream#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceInUseException": + case "com.amazonaws.appstream#ResourceInUseException": + response = { + ...(await deserializeAws_json1_1ResourceInUseExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.appstream#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1DeleteImageBuilderCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DeleteImageBuilderCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DeleteImageBuilderResult(data, context); + const response: DeleteImageBuilderCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DeleteImageBuilderCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -3014,6 +3377,14 @@ const deserializeAws_json1_1DeleteStackCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "OperationNotPermittedException": + case "com.amazonaws.appstream#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "ResourceInUseException": case "com.amazonaws.appstream#ResourceInUseException": response = { @@ -3403,6 +3774,76 @@ const deserializeAws_json1_1DescribeDirectoryConfigsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1DescribeEntitlementsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DescribeEntitlementsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DescribeEntitlementsResult(data, context); + const response: DescribeEntitlementsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DescribeEntitlementsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntitlementNotFoundException": + case "com.amazonaws.appstream#EntitlementNotFoundException": + response = { + ...(await deserializeAws_json1_1EntitlementNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.appstream#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.appstream#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1DescribeFleetsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -4053,6 +4494,76 @@ const deserializeAws_json1_1DisassociateApplicationFleetCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1DisassociateApplicationFromEntitlementCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DisassociateApplicationFromEntitlementCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DisassociateApplicationFromEntitlementResult(data, context); + const response: DisassociateApplicationFromEntitlementCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DisassociateApplicationFromEntitlementCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntitlementNotFoundException": + case "com.amazonaws.appstream#EntitlementNotFoundException": + response = { + ...(await deserializeAws_json1_1EntitlementNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.appstream#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.appstream#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1DisassociateFleetCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -4331,6 +4842,76 @@ const deserializeAws_json1_1ListAssociatedStacksCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1ListEntitledApplicationsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1ListEntitledApplicationsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1ListEntitledApplicationsResult(data, context); + const response: ListEntitledApplicationsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1ListEntitledApplicationsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntitlementNotFoundException": + case "com.amazonaws.appstream#EntitlementNotFoundException": + response = { + ...(await deserializeAws_json1_1EntitlementNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.appstream#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.appstream#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1ListTagsForResourceCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -4837,27 +5418,97 @@ const deserializeAws_json1_1UntagResourceCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1UpdateApplicationCommand = async ( +export const deserializeAws_json1_1UpdateApplicationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1UpdateApplicationCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1UpdateApplicationResult(data, context); + const response: UpdateApplicationCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1UpdateApplicationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConcurrentModificationException": + case "com.amazonaws.appstream#ConcurrentModificationException": + response = { + ...(await deserializeAws_json1_1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.appstream#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.appstream#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1UpdateDirectoryConfigCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1UpdateApplicationCommandError(output, context); + return deserializeAws_json1_1UpdateDirectoryConfigCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1UpdateApplicationResult(data, context); - const response: UpdateApplicationCommandOutput = { + contents = deserializeAws_json1_1UpdateDirectoryConfigResult(data, context); + const response: UpdateDirectoryConfigCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1UpdateApplicationCommandError = async ( +const deserializeAws_json1_1UpdateDirectoryConfigCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -4874,6 +5525,14 @@ const deserializeAws_json1_1UpdateApplicationCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "InvalidRoleException": + case "com.amazonaws.appstream#InvalidRoleException": + response = { + ...(await deserializeAws_json1_1InvalidRoleExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "OperationNotPermittedException": case "com.amazonaws.appstream#OperationNotPermittedException": response = { @@ -4882,6 +5541,14 @@ const deserializeAws_json1_1UpdateApplicationCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceInUseException": + case "com.amazonaws.appstream#ResourceInUseException": + response = { + ...(await deserializeAws_json1_1ResourceInUseExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "ResourceNotFoundException": case "com.amazonaws.appstream#ResourceNotFoundException": response = { @@ -4907,27 +5574,27 @@ const deserializeAws_json1_1UpdateApplicationCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1UpdateDirectoryConfigCommand = async ( +export const deserializeAws_json1_1UpdateEntitlementCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1UpdateDirectoryConfigCommandError(output, context); + return deserializeAws_json1_1UpdateEntitlementCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1UpdateDirectoryConfigResult(data, context); - const response: UpdateDirectoryConfigCommandOutput = { + contents = deserializeAws_json1_1UpdateEntitlementResult(data, context); + const response: UpdateEntitlementCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1UpdateDirectoryConfigCommandError = async ( +const deserializeAws_json1_1UpdateEntitlementCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -4944,10 +5611,10 @@ const deserializeAws_json1_1UpdateDirectoryConfigCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "InvalidRoleException": - case "com.amazonaws.appstream#InvalidRoleException": + case "EntitlementNotFoundException": + case "com.amazonaws.appstream#EntitlementNotFoundException": response = { - ...(await deserializeAws_json1_1InvalidRoleExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EntitlementNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -4960,14 +5627,6 @@ const deserializeAws_json1_1UpdateDirectoryConfigCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "ResourceInUseException": - case "com.amazonaws.appstream#ResourceInUseException": - response = { - ...(await deserializeAws_json1_1ResourceInUseExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "ResourceNotFoundException": case "com.amazonaws.appstream#ResourceNotFoundException": response = { @@ -5330,6 +5989,36 @@ const deserializeAws_json1_1ConcurrentModificationExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1EntitlementAlreadyExistsExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1EntitlementAlreadyExistsException(body, context); + const contents: EntitlementAlreadyExistsException = { + name: "EntitlementAlreadyExistsException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1EntitlementNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1EntitlementNotFoundException(body, context); + const contents: EntitlementNotFoundException = { + name: "EntitlementNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1IncompatibleImageExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -5556,6 +6245,19 @@ const serializeAws_json1_1AssociateApplicationFleetRequest = ( }; }; +const serializeAws_json1_1AssociateApplicationToEntitlementRequest = ( + input: AssociateApplicationToEntitlementRequest, + context: __SerdeContext +): any => { + return { + ...(input.ApplicationIdentifier !== undefined && + input.ApplicationIdentifier !== null && { ApplicationIdentifier: input.ApplicationIdentifier }), + ...(input.EntitlementName !== undefined && + input.EntitlementName !== null && { EntitlementName: input.EntitlementName }), + ...(input.StackName !== undefined && input.StackName !== null && { StackName: input.StackName }), + }; +}; + const serializeAws_json1_1AssociateFleetRequest = (input: AssociateFleetRequest, context: __SerdeContext): any => { return { ...(input.FleetName !== undefined && input.FleetName !== null && { FleetName: input.FleetName }), @@ -5686,6 +6388,22 @@ const serializeAws_json1_1CreateDirectoryConfigRequest = ( }; }; +const serializeAws_json1_1CreateEntitlementRequest = ( + input: CreateEntitlementRequest, + context: __SerdeContext +): any => { + return { + ...(input.AppVisibility !== undefined && input.AppVisibility !== null && { AppVisibility: input.AppVisibility }), + ...(input.Attributes !== undefined && + input.Attributes !== null && { + Attributes: serializeAws_json1_1EntitlementAttributeList(input.Attributes, context), + }), + ...(input.Description !== undefined && input.Description !== null && { Description: input.Description }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.StackName !== undefined && input.StackName !== null && { StackName: input.StackName }), + }; +}; + const serializeAws_json1_1CreateFleetRequest = (input: CreateFleetRequest, context: __SerdeContext): any => { return { ...(input.ComputeCapacity !== undefined && @@ -5874,6 +6592,16 @@ const serializeAws_json1_1DeleteDirectoryConfigRequest = ( }; }; +const serializeAws_json1_1DeleteEntitlementRequest = ( + input: DeleteEntitlementRequest, + context: __SerdeContext +): any => { + return { + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.StackName !== undefined && input.StackName !== null && { StackName: input.StackName }), + }; +}; + const serializeAws_json1_1DeleteFleetRequest = (input: DeleteFleetRequest, context: __SerdeContext): any => { return { ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), @@ -5976,6 +6704,18 @@ const serializeAws_json1_1DescribeDirectoryConfigsRequest = ( }; }; +const serializeAws_json1_1DescribeEntitlementsRequest = ( + input: DescribeEntitlementsRequest, + context: __SerdeContext +): any => { + return { + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.StackName !== undefined && input.StackName !== null && { StackName: input.StackName }), + }; +}; + const serializeAws_json1_1DescribeFleetsRequest = (input: DescribeFleetsRequest, context: __SerdeContext): any => { return { ...(input.Names !== undefined && @@ -6105,6 +6845,19 @@ const serializeAws_json1_1DisassociateApplicationFleetRequest = ( }; }; +const serializeAws_json1_1DisassociateApplicationFromEntitlementRequest = ( + input: DisassociateApplicationFromEntitlementRequest, + context: __SerdeContext +): any => { + return { + ...(input.ApplicationIdentifier !== undefined && + input.ApplicationIdentifier !== null && { ApplicationIdentifier: input.ApplicationIdentifier }), + ...(input.EntitlementName !== undefined && + input.EntitlementName !== null && { EntitlementName: input.EntitlementName }), + ...(input.StackName !== undefined && input.StackName !== null && { StackName: input.StackName }), + }; +}; + const serializeAws_json1_1DisassociateFleetRequest = ( input: DisassociateFleetRequest, context: __SerdeContext @@ -6155,6 +6908,24 @@ const serializeAws_json1_1EnableUserRequest = (input: EnableUserRequest, context }; }; +const serializeAws_json1_1EntitlementAttribute = (input: EntitlementAttribute, context: __SerdeContext): any => { + return { + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.Value !== undefined && input.Value !== null && { Value: input.Value }), + }; +}; + +const serializeAws_json1_1EntitlementAttributeList = (input: EntitlementAttribute[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_1EntitlementAttribute(entry, context); + }); +}; + const serializeAws_json1_1ExpireSessionRequest = (input: ExpireSessionRequest, context: __SerdeContext): any => { return { ...(input.SessionId !== undefined && input.SessionId !== null && { SessionId: input.SessionId }), @@ -6200,6 +6971,19 @@ const serializeAws_json1_1ListAssociatedStacksRequest = ( }; }; +const serializeAws_json1_1ListEntitledApplicationsRequest = ( + input: ListEntitledApplicationsRequest, + context: __SerdeContext +): any => { + return { + ...(input.EntitlementName !== undefined && + input.EntitlementName !== null && { EntitlementName: input.EntitlementName }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.StackName !== undefined && input.StackName !== null && { StackName: input.StackName }), + }; +}; + const serializeAws_json1_1ListTagsForResourceRequest = ( input: ListTagsForResourceRequest, context: __SerdeContext @@ -6447,6 +7231,22 @@ const serializeAws_json1_1UpdateDirectoryConfigRequest = ( }; }; +const serializeAws_json1_1UpdateEntitlementRequest = ( + input: UpdateEntitlementRequest, + context: __SerdeContext +): any => { + return { + ...(input.AppVisibility !== undefined && input.AppVisibility !== null && { AppVisibility: input.AppVisibility }), + ...(input.Attributes !== undefined && + input.Attributes !== null && { + Attributes: serializeAws_json1_1EntitlementAttributeList(input.Attributes, context), + }), + ...(input.Description !== undefined && input.Description !== null && { Description: input.Description }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.StackName !== undefined && input.StackName !== null && { StackName: input.StackName }), + }; +}; + const serializeAws_json1_1UpdateFleetRequest = (input: UpdateFleetRequest, context: __SerdeContext): any => { return { ...(input.AttributesToDelete !== undefined && @@ -6749,6 +7549,13 @@ const deserializeAws_json1_1AssociateApplicationFleetResult = ( } as any; }; +const deserializeAws_json1_1AssociateApplicationToEntitlementResult = ( + output: any, + context: __SerdeContext +): AssociateApplicationToEntitlementResult => { + return {} as any; +}; + const deserializeAws_json1_1AssociateFleetResult = (output: any, context: __SerdeContext): AssociateFleetResult => { return {} as any; }; @@ -6834,6 +7641,18 @@ const deserializeAws_json1_1CreateDirectoryConfigResult = ( } as any; }; +const deserializeAws_json1_1CreateEntitlementResult = ( + output: any, + context: __SerdeContext +): CreateEntitlementResult => { + return { + Entitlement: + output.Entitlement !== undefined && output.Entitlement !== null + ? deserializeAws_json1_1Entitlement(output.Entitlement, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1CreateFleetResult = (output: any, context: __SerdeContext): CreateFleetResult => { return { Fleet: @@ -6935,6 +7754,13 @@ const deserializeAws_json1_1DeleteDirectoryConfigResult = ( return {} as any; }; +const deserializeAws_json1_1DeleteEntitlementResult = ( + output: any, + context: __SerdeContext +): DeleteEntitlementResult => { + return {} as any; +}; + const deserializeAws_json1_1DeleteFleetResult = (output: any, context: __SerdeContext): DeleteFleetResult => { return {} as any; }; @@ -7034,6 +7860,19 @@ const deserializeAws_json1_1DescribeDirectoryConfigsResult = ( } as any; }; +const deserializeAws_json1_1DescribeEntitlementsResult = ( + output: any, + context: __SerdeContext +): DescribeEntitlementsResult => { + return { + Entitlements: + output.Entitlements !== undefined && output.Entitlements !== null + ? deserializeAws_json1_1EntitlementList(output.Entitlements, context) + : undefined, + NextToken: __expectString(output.NextToken), + } as any; +}; + const deserializeAws_json1_1DescribeFleetsResult = (output: any, context: __SerdeContext): DescribeFleetsResult => { return { Fleets: @@ -7180,6 +8019,13 @@ const deserializeAws_json1_1DisassociateApplicationFleetResult = ( return {} as any; }; +const deserializeAws_json1_1DisassociateApplicationFromEntitlementResult = ( + output: any, + context: __SerdeContext +): DisassociateApplicationFromEntitlementResult => { + return {} as any; +}; + const deserializeAws_json1_1DisassociateFleetResult = ( output: any, context: __SerdeContext @@ -7220,6 +8066,94 @@ const deserializeAws_json1_1EnableUserResult = (output: any, context: __SerdeCon return {} as any; }; +const deserializeAws_json1_1EntitledApplication = (output: any, context: __SerdeContext): EntitledApplication => { + return { + ApplicationIdentifier: __expectString(output.ApplicationIdentifier), + } as any; +}; + +const deserializeAws_json1_1EntitledApplicationList = (output: any, context: __SerdeContext): EntitledApplication[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1EntitledApplication(entry, context); + }); +}; + +const deserializeAws_json1_1Entitlement = (output: any, context: __SerdeContext): Entitlement => { + return { + AppVisibility: __expectString(output.AppVisibility), + Attributes: + output.Attributes !== undefined && output.Attributes !== null + ? deserializeAws_json1_1EntitlementAttributeList(output.Attributes, context) + : undefined, + CreatedTime: + output.CreatedTime !== undefined && output.CreatedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreatedTime))) + : undefined, + Description: __expectString(output.Description), + LastModifiedTime: + output.LastModifiedTime !== undefined && output.LastModifiedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastModifiedTime))) + : undefined, + Name: __expectString(output.Name), + StackName: __expectString(output.StackName), + } as any; +}; + +const deserializeAws_json1_1EntitlementAlreadyExistsException = ( + output: any, + context: __SerdeContext +): EntitlementAlreadyExistsException => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1EntitlementAttribute = (output: any, context: __SerdeContext): EntitlementAttribute => { + return { + Name: __expectString(output.Name), + Value: __expectString(output.Value), + } as any; +}; + +const deserializeAws_json1_1EntitlementAttributeList = ( + output: any, + context: __SerdeContext +): EntitlementAttribute[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1EntitlementAttribute(entry, context); + }); +}; + +const deserializeAws_json1_1EntitlementList = (output: any, context: __SerdeContext): Entitlement[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1Entitlement(entry, context); + }); +}; + +const deserializeAws_json1_1EntitlementNotFoundException = ( + output: any, + context: __SerdeContext +): EntitlementNotFoundException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1ExpireSessionResult = (output: any, context: __SerdeContext): ExpireSessionResult => { return {} as any; }; @@ -7518,6 +8452,19 @@ const deserializeAws_json1_1ListAssociatedStacksResult = ( } as any; }; +const deserializeAws_json1_1ListEntitledApplicationsResult = ( + output: any, + context: __SerdeContext +): ListEntitledApplicationsResult => { + return { + EntitledApplications: + output.EntitledApplications !== undefined && output.EntitledApplications !== null + ? deserializeAws_json1_1EntitledApplicationList(output.EntitledApplications, context) + : undefined, + NextToken: __expectString(output.NextToken), + } as any; +}; + const deserializeAws_json1_1ListTagsForResourceResponse = ( output: any, context: __SerdeContext @@ -7932,6 +8879,18 @@ const deserializeAws_json1_1UpdateDirectoryConfigResult = ( } as any; }; +const deserializeAws_json1_1UpdateEntitlementResult = ( + output: any, + context: __SerdeContext +): UpdateEntitlementResult => { + return { + Entitlement: + output.Entitlement !== undefined && output.Entitlement !== null + ? deserializeAws_json1_1Entitlement(output.Entitlement, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1UpdateFleetResult = (output: any, context: __SerdeContext): UpdateFleetResult => { return { Fleet: diff --git a/clients/client-appsync/src/models/models_0.ts b/clients/client-appsync/src/models/models_0.ts index f62cfc9da1c5..8cebb4649ca9 100644 --- a/clients/client-appsync/src/models/models_0.ts +++ b/clients/client-appsync/src/models/models_0.ts @@ -1650,6 +1650,11 @@ export interface CreateFunctionRequest { * resolver is invoked.

*/ syncConfig?: SyncConfig; + + /** + *

The maximum batching size for a resolver.

+ */ + maxBatchSize?: number; } export namespace CreateFunctionRequest { @@ -1714,6 +1719,11 @@ export interface FunctionConfiguration { * resolver is invoked.

*/ syncConfig?: SyncConfig; + + /** + *

The maximum batching size for a resolver.

+ */ + maxBatchSize?: number; } export namespace FunctionConfiguration { @@ -2136,6 +2146,11 @@ export interface CreateResolverRequest { *

The caching configuration for the resolver.

*/ cachingConfig?: CachingConfig; + + /** + *

The maximum batching size for a resolver.

+ */ + maxBatchSize?: number; } export namespace CreateResolverRequest { @@ -2215,6 +2230,11 @@ export interface Resolver { *

The caching configuration for the resolver.

*/ cachingConfig?: CachingConfig; + + /** + *

The maximum batching size for a resolver.

+ */ + maxBatchSize?: number; } export namespace Resolver { @@ -3934,6 +3954,11 @@ export interface UpdateFunctionRequest { * resolver is invoked.

*/ syncConfig?: SyncConfig; + + /** + *

The maximum batching size for a resolver.

+ */ + maxBatchSize?: number; } export namespace UpdateFunctionRequest { @@ -4107,6 +4132,11 @@ export interface UpdateResolverRequest { *

The caching configuration for the resolver.

*/ cachingConfig?: CachingConfig; + + /** + *

The maximum batching size for a resolver.

+ */ + maxBatchSize?: number; } export namespace UpdateResolverRequest { diff --git a/clients/client-appsync/src/protocols/Aws_restJson1.ts b/clients/client-appsync/src/protocols/Aws_restJson1.ts index 875df7aa1e95..9a69a7595de9 100644 --- a/clients/client-appsync/src/protocols/Aws_restJson1.ts +++ b/clients/client-appsync/src/protocols/Aws_restJson1.ts @@ -353,6 +353,7 @@ export const serializeAws_restJson1CreateFunctionCommand = async ( ...(input.description !== undefined && input.description !== null && { description: input.description }), ...(input.functionVersion !== undefined && input.functionVersion !== null && { functionVersion: input.functionVersion }), + ...(input.maxBatchSize !== undefined && input.maxBatchSize !== null && { maxBatchSize: input.maxBatchSize }), ...(input.name !== undefined && input.name !== null && { name: input.name }), ...(input.requestMappingTemplate !== undefined && input.requestMappingTemplate !== null && { requestMappingTemplate: input.requestMappingTemplate }), @@ -460,6 +461,7 @@ export const serializeAws_restJson1CreateResolverCommand = async ( input.dataSourceName !== null && { dataSourceName: input.dataSourceName }), ...(input.fieldName !== undefined && input.fieldName !== null && { fieldName: input.fieldName }), ...(input.kind !== undefined && input.kind !== null && { kind: input.kind }), + ...(input.maxBatchSize !== undefined && input.maxBatchSize !== null && { maxBatchSize: input.maxBatchSize }), ...(input.pipelineConfig !== undefined && input.pipelineConfig !== null && { pipelineConfig: serializeAws_restJson1PipelineConfig(input.pipelineConfig, context), @@ -1828,6 +1830,7 @@ export const serializeAws_restJson1UpdateFunctionCommand = async ( ...(input.description !== undefined && input.description !== null && { description: input.description }), ...(input.functionVersion !== undefined && input.functionVersion !== null && { functionVersion: input.functionVersion }), + ...(input.maxBatchSize !== undefined && input.maxBatchSize !== null && { maxBatchSize: input.maxBatchSize }), ...(input.name !== undefined && input.name !== null && { name: input.name }), ...(input.requestMappingTemplate !== undefined && input.requestMappingTemplate !== null && { requestMappingTemplate: input.requestMappingTemplate }), @@ -1951,6 +1954,7 @@ export const serializeAws_restJson1UpdateResolverCommand = async ( ...(input.dataSourceName !== undefined && input.dataSourceName !== null && { dataSourceName: input.dataSourceName }), ...(input.kind !== undefined && input.kind !== null && { kind: input.kind }), + ...(input.maxBatchSize !== undefined && input.maxBatchSize !== null && { maxBatchSize: input.maxBatchSize }), ...(input.pipelineConfig !== undefined && input.pipelineConfig !== null && { pipelineConfig: serializeAws_restJson1PipelineConfig(input.pipelineConfig, context), @@ -6809,6 +6813,7 @@ const deserializeAws_restJson1FunctionConfiguration = (output: any, context: __S functionArn: __expectString(output.functionArn), functionId: __expectString(output.functionId), functionVersion: __expectString(output.functionVersion), + maxBatchSize: __expectInt32(output.maxBatchSize), name: __expectString(output.name), requestMappingTemplate: __expectString(output.requestMappingTemplate), responseMappingTemplate: __expectString(output.responseMappingTemplate), @@ -7013,6 +7018,7 @@ const deserializeAws_restJson1Resolver = (output: any, context: __SerdeContext): dataSourceName: __expectString(output.dataSourceName), fieldName: __expectString(output.fieldName), kind: __expectString(output.kind), + maxBatchSize: __expectInt32(output.maxBatchSize), pipelineConfig: output.pipelineConfig !== undefined && output.pipelineConfig !== null ? deserializeAws_restJson1PipelineConfig(output.pipelineConfig, context) diff --git a/clients/client-cloudtrail/src/CloudTrail.ts b/clients/client-cloudtrail/src/CloudTrail.ts index d5102696b068..6cb488a5072f 100644 --- a/clients/client-cloudtrail/src/CloudTrail.ts +++ b/clients/client-cloudtrail/src/CloudTrail.ts @@ -2,13 +2,34 @@ import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; import { CloudTrailClient } from "./CloudTrailClient"; import { AddTagsCommand, AddTagsCommandInput, AddTagsCommandOutput } from "./commands/AddTagsCommand"; +import { CancelQueryCommand, CancelQueryCommandInput, CancelQueryCommandOutput } from "./commands/CancelQueryCommand"; +import { + CreateEventDataStoreCommand, + CreateEventDataStoreCommandInput, + CreateEventDataStoreCommandOutput, +} from "./commands/CreateEventDataStoreCommand"; import { CreateTrailCommand, CreateTrailCommandInput, CreateTrailCommandOutput } from "./commands/CreateTrailCommand"; +import { + DeleteEventDataStoreCommand, + DeleteEventDataStoreCommandInput, + DeleteEventDataStoreCommandOutput, +} from "./commands/DeleteEventDataStoreCommand"; import { DeleteTrailCommand, DeleteTrailCommandInput, DeleteTrailCommandOutput } from "./commands/DeleteTrailCommand"; +import { + DescribeQueryCommand, + DescribeQueryCommandInput, + DescribeQueryCommandOutput, +} from "./commands/DescribeQueryCommand"; import { DescribeTrailsCommand, DescribeTrailsCommandInput, DescribeTrailsCommandOutput, } from "./commands/DescribeTrailsCommand"; +import { + GetEventDataStoreCommand, + GetEventDataStoreCommandInput, + GetEventDataStoreCommandOutput, +} from "./commands/GetEventDataStoreCommand"; import { GetEventSelectorsCommand, GetEventSelectorsCommandInput, @@ -19,17 +40,28 @@ import { GetInsightSelectorsCommandInput, GetInsightSelectorsCommandOutput, } from "./commands/GetInsightSelectorsCommand"; +import { + GetQueryResultsCommand, + GetQueryResultsCommandInput, + GetQueryResultsCommandOutput, +} from "./commands/GetQueryResultsCommand"; import { GetTrailCommand, GetTrailCommandInput, GetTrailCommandOutput } from "./commands/GetTrailCommand"; import { GetTrailStatusCommand, GetTrailStatusCommandInput, GetTrailStatusCommandOutput, } from "./commands/GetTrailStatusCommand"; +import { + ListEventDataStoresCommand, + ListEventDataStoresCommandInput, + ListEventDataStoresCommandOutput, +} from "./commands/ListEventDataStoresCommand"; import { ListPublicKeysCommand, ListPublicKeysCommandInput, ListPublicKeysCommandOutput, } from "./commands/ListPublicKeysCommand"; +import { ListQueriesCommand, ListQueriesCommandInput, ListQueriesCommandOutput } from "./commands/ListQueriesCommand"; import { ListTagsCommand, ListTagsCommandInput, ListTagsCommandOutput } from "./commands/ListTagsCommand"; import { ListTrailsCommand, ListTrailsCommandInput, ListTrailsCommandOutput } from "./commands/ListTrailsCommand"; import { @@ -48,12 +80,23 @@ import { PutInsightSelectorsCommandOutput, } from "./commands/PutInsightSelectorsCommand"; import { RemoveTagsCommand, RemoveTagsCommandInput, RemoveTagsCommandOutput } from "./commands/RemoveTagsCommand"; +import { + RestoreEventDataStoreCommand, + RestoreEventDataStoreCommandInput, + RestoreEventDataStoreCommandOutput, +} from "./commands/RestoreEventDataStoreCommand"; import { StartLoggingCommand, StartLoggingCommandInput, StartLoggingCommandOutput, } from "./commands/StartLoggingCommand"; +import { StartQueryCommand, StartQueryCommandInput, StartQueryCommandOutput } from "./commands/StartQueryCommand"; import { StopLoggingCommand, StopLoggingCommandInput, StopLoggingCommandOutput } from "./commands/StopLoggingCommand"; +import { + UpdateEventDataStoreCommand, + UpdateEventDataStoreCommandInput, + UpdateEventDataStoreCommandOutput, +} from "./commands/UpdateEventDataStoreCommand"; import { UpdateTrailCommand, UpdateTrailCommandInput, UpdateTrailCommandOutput } from "./commands/UpdateTrailCommand"; /** @@ -104,6 +147,66 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Cancels a query if the query is not in a terminated state, such as CANCELLED, FAILED or FINISHED. You must specify an ARN value for EventDataStore. + * The ID of the query that you want to cancel is also required. When you run CancelQuery, the query status might + * show as CANCELLED even if the operation is not yet finished.

+ */ + public cancelQuery(args: CancelQueryCommandInput, options?: __HttpHandlerOptions): Promise; + public cancelQuery(args: CancelQueryCommandInput, cb: (err: any, data?: CancelQueryCommandOutput) => void): void; + public cancelQuery( + args: CancelQueryCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CancelQueryCommandOutput) => void + ): void; + public cancelQuery( + args: CancelQueryCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CancelQueryCommandOutput) => void), + cb?: (err: any, data?: CancelQueryCommandOutput) => void + ): Promise | void { + const command = new CancelQueryCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Creates a new event data store.

+ */ + public createEventDataStore( + args: CreateEventDataStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createEventDataStore( + args: CreateEventDataStoreCommandInput, + cb: (err: any, data?: CreateEventDataStoreCommandOutput) => void + ): void; + public createEventDataStore( + args: CreateEventDataStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateEventDataStoreCommandOutput) => void + ): void; + public createEventDataStore( + args: CreateEventDataStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateEventDataStoreCommandOutput) => void), + cb?: (err: any, data?: CreateEventDataStoreCommandOutput) => void + ): Promise | void { + const command = new CreateEventDataStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Creates a trail that specifies the settings for delivery of log data to an Amazon S3 bucket. *

@@ -131,6 +234,44 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Disables the event data store specified by EventDataStore, which accepts an event data store ARN. + * After you run DeleteEventDataStore, the event data store is automatically deleted after a wait period of + * seven days. TerminationProtectionEnabled must be set to False on the event data store; this + * operation cannot work if TerminationProtectionEnabled is True.

+ *

After you run DeleteEventDataStore on an event data store, you cannot run ListQueries, + * DescribeQuery, or GetQueryResults on queries that are using an event data store in a + * PENDING_DELETION state.

+ */ + public deleteEventDataStore( + args: DeleteEventDataStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteEventDataStore( + args: DeleteEventDataStoreCommandInput, + cb: (err: any, data?: DeleteEventDataStoreCommandOutput) => void + ): void; + public deleteEventDataStore( + args: DeleteEventDataStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteEventDataStoreCommandOutput) => void + ): void; + public deleteEventDataStore( + args: DeleteEventDataStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteEventDataStoreCommandOutput) => void), + cb?: (err: any, data?: DeleteEventDataStoreCommandOutput) => void + ): Promise | void { + const command = new DeleteEventDataStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Deletes a trail. This operation must be called from the region in which the trail was * created. DeleteTrail cannot be called on the shadow trails (replicated trails @@ -159,6 +300,39 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Returns metadata about a query, including query run time in milliseconds, number of events scanned and matched, and query + * status. You must specify an ARN for EventDataStore, and a value for QueryID.

+ */ + public describeQuery( + args: DescribeQueryCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeQuery( + args: DescribeQueryCommandInput, + cb: (err: any, data?: DescribeQueryCommandOutput) => void + ): void; + public describeQuery( + args: DescribeQueryCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeQueryCommandOutput) => void + ): void; + public describeQuery( + args: DescribeQueryCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeQueryCommandOutput) => void), + cb?: (err: any, data?: DescribeQueryCommandOutput) => void + ): Promise | void { + const command = new DescribeQueryCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Retrieves settings for one or more trails associated with the current region for your account.

*/ @@ -191,6 +365,38 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Returns information about an event data store specified as either an ARN or the ID portion of the ARN.

+ */ + public getEventDataStore( + args: GetEventDataStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getEventDataStore( + args: GetEventDataStoreCommandInput, + cb: (err: any, data?: GetEventDataStoreCommandOutput) => void + ): void; + public getEventDataStore( + args: GetEventDataStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetEventDataStoreCommandOutput) => void + ): void; + public getEventDataStore( + args: GetEventDataStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetEventDataStoreCommandOutput) => void), + cb?: (err: any, data?: GetEventDataStoreCommandOutput) => void + ): Promise | void { + const command = new GetEventDataStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Describes the settings for the event selectors that you configured for your trail. * The information returned for your event selectors includes the following:

@@ -277,6 +483,39 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Gets event data results of a query. You must specify the QueryID value returned by the StartQuery + * operation, and an ARN for EventDataStore.

+ */ + public getQueryResults( + args: GetQueryResultsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getQueryResults( + args: GetQueryResultsCommandInput, + cb: (err: any, data?: GetQueryResultsCommandOutput) => void + ): void; + public getQueryResults( + args: GetQueryResultsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetQueryResultsCommandOutput) => void + ): void; + public getQueryResults( + args: GetQueryResultsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetQueryResultsCommandOutput) => void), + cb?: (err: any, data?: GetQueryResultsCommandOutput) => void + ): Promise | void { + const command = new GetQueryResultsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Returns settings information for a specified trail.

*/ @@ -335,6 +574,38 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Returns information about all event data stores in the account, in the current region.

+ */ + public listEventDataStores( + args: ListEventDataStoresCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listEventDataStores( + args: ListEventDataStoresCommandInput, + cb: (err: any, data?: ListEventDataStoresCommandOutput) => void + ): void; + public listEventDataStores( + args: ListEventDataStoresCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListEventDataStoresCommandOutput) => void + ): void; + public listEventDataStores( + args: ListEventDataStoresCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListEventDataStoresCommandOutput) => void), + cb?: (err: any, data?: ListEventDataStoresCommandOutput) => void + ): Promise | void { + const command = new ListEventDataStoresCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Returns all public keys whose private keys were used to sign the digest files within the specified time range. The public key is needed to validate digest files that were signed with its corresponding private key.

* @@ -372,6 +643,36 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Returns a list of queries and query statuses for the past seven days. You must specify an ARN value for + * EventDataStore. Optionally, to shorten the list of results, you can specify a time range, + * formatted as timestamps, by adding StartTime and EndTime parameters, and a + * QueryStatus value. Valid values for QueryStatus include QUEUED, RUNNING, + * FINISHED, FAILED, or CANCELLED.

+ */ + public listQueries(args: ListQueriesCommandInput, options?: __HttpHandlerOptions): Promise; + public listQueries(args: ListQueriesCommandInput, cb: (err: any, data?: ListQueriesCommandOutput) => void): void; + public listQueries( + args: ListQueriesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListQueriesCommandOutput) => void + ): void; + public listQueries( + args: ListQueriesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListQueriesCommandOutput) => void), + cb?: (err: any, data?: ListQueriesCommandOutput) => void + ): Promise | void { + const command = new ListQueriesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Lists the tags for the trail in the current region.

*/ @@ -580,7 +881,7 @@ export class CloudTrail extends CloudTrailClient { *

Lets you enable Insights event logging by specifying the Insights * selectors that you want to enable on an existing trail. You also use * PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. - * The valid Insights event type in this release is ApiCallRateInsight.

+ * The valid Insights event types in this release are ApiErrorRateInsight and ApiCallRateInsight.

*/ public putInsightSelectors( args: PutInsightSelectorsCommandInput, @@ -637,6 +938,40 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Restores a deleted event data store specified by EventDataStore, which accepts an event data store ARN. + * You can only restore a deleted event data store within the seven-day wait period after deletion. Restoring an event data store + * can take several minutes, depending on the size of the event data store.

+ */ + public restoreEventDataStore( + args: RestoreEventDataStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public restoreEventDataStore( + args: RestoreEventDataStoreCommandInput, + cb: (err: any, data?: RestoreEventDataStoreCommandOutput) => void + ): void; + public restoreEventDataStore( + args: RestoreEventDataStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: RestoreEventDataStoreCommandOutput) => void + ): void; + public restoreEventDataStore( + args: RestoreEventDataStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: RestoreEventDataStoreCommandOutput) => void), + cb?: (err: any, data?: RestoreEventDataStoreCommandOutput) => void + ): Promise | void { + const command = new RestoreEventDataStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Starts the recording of Amazon Web Services API calls and log file delivery for a trail. For a trail that is enabled in all regions, this operation must be called from the region in which the trail was created. This operation cannot be called on the shadow trails (replicated trails in other regions) of a trail that is enabled in all regions.

*/ @@ -666,6 +1001,33 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Starts a CloudTrail Lake query. The required QueryStatement + * parameter provides your SQL query, enclosed in single quotation marks.

+ */ + public startQuery(args: StartQueryCommandInput, options?: __HttpHandlerOptions): Promise; + public startQuery(args: StartQueryCommandInput, cb: (err: any, data?: StartQueryCommandOutput) => void): void; + public startQuery( + args: StartQueryCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StartQueryCommandOutput) => void + ): void; + public startQuery( + args: StartQueryCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StartQueryCommandOutput) => void), + cb?: (err: any, data?: StartQueryCommandOutput) => void + ): Promise | void { + const command = new StartQueryCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Suspends the recording of Amazon Web Services API calls and log file delivery for the specified trail. * Under most circumstances, there is no need to use this action. You can update a trail @@ -698,6 +1060,43 @@ export class CloudTrail extends CloudTrailClient { } } + /** + *

Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. + * Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. + * RetentionPeriod is in days, and valid values are integers between 90 and 2555. + * By default, TerminationProtection is enabled. AdvancedEventSelectors includes or excludes management + * and data events in your event data store; for more information about AdvancedEventSelectors, see + * PutEventSelectorsRequest$AdvancedEventSelectors.

+ */ + public updateEventDataStore( + args: UpdateEventDataStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateEventDataStore( + args: UpdateEventDataStoreCommandInput, + cb: (err: any, data?: UpdateEventDataStoreCommandOutput) => void + ): void; + public updateEventDataStore( + args: UpdateEventDataStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateEventDataStoreCommandOutput) => void + ): void; + public updateEventDataStore( + args: UpdateEventDataStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateEventDataStoreCommandOutput) => void), + cb?: (err: any, data?: UpdateEventDataStoreCommandOutput) => void + ): Promise | void { + const command = new UpdateEventDataStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Updates trail settings that control what events you are logging, and how to handle log files. Changes to a trail do not require * stopping the CloudTrail service. Use this action to designate an existing bucket for log diff --git a/clients/client-cloudtrail/src/CloudTrailClient.ts b/clients/client-cloudtrail/src/CloudTrailClient.ts index b6aeadcddebc..9902558036dc 100644 --- a/clients/client-cloudtrail/src/CloudTrailClient.ts +++ b/clients/client-cloudtrail/src/CloudTrailClient.ts @@ -50,17 +50,34 @@ import { } from "@aws-sdk/types"; import { AddTagsCommandInput, AddTagsCommandOutput } from "./commands/AddTagsCommand"; +import { CancelQueryCommandInput, CancelQueryCommandOutput } from "./commands/CancelQueryCommand"; +import { + CreateEventDataStoreCommandInput, + CreateEventDataStoreCommandOutput, +} from "./commands/CreateEventDataStoreCommand"; import { CreateTrailCommandInput, CreateTrailCommandOutput } from "./commands/CreateTrailCommand"; +import { + DeleteEventDataStoreCommandInput, + DeleteEventDataStoreCommandOutput, +} from "./commands/DeleteEventDataStoreCommand"; import { DeleteTrailCommandInput, DeleteTrailCommandOutput } from "./commands/DeleteTrailCommand"; +import { DescribeQueryCommandInput, DescribeQueryCommandOutput } from "./commands/DescribeQueryCommand"; import { DescribeTrailsCommandInput, DescribeTrailsCommandOutput } from "./commands/DescribeTrailsCommand"; +import { GetEventDataStoreCommandInput, GetEventDataStoreCommandOutput } from "./commands/GetEventDataStoreCommand"; import { GetEventSelectorsCommandInput, GetEventSelectorsCommandOutput } from "./commands/GetEventSelectorsCommand"; import { GetInsightSelectorsCommandInput, GetInsightSelectorsCommandOutput, } from "./commands/GetInsightSelectorsCommand"; +import { GetQueryResultsCommandInput, GetQueryResultsCommandOutput } from "./commands/GetQueryResultsCommand"; import { GetTrailCommandInput, GetTrailCommandOutput } from "./commands/GetTrailCommand"; import { GetTrailStatusCommandInput, GetTrailStatusCommandOutput } from "./commands/GetTrailStatusCommand"; +import { + ListEventDataStoresCommandInput, + ListEventDataStoresCommandOutput, +} from "./commands/ListEventDataStoresCommand"; import { ListPublicKeysCommandInput, ListPublicKeysCommandOutput } from "./commands/ListPublicKeysCommand"; +import { ListQueriesCommandInput, ListQueriesCommandOutput } from "./commands/ListQueriesCommand"; import { ListTagsCommandInput, ListTagsCommandOutput } from "./commands/ListTagsCommand"; import { ListTrailsCommandInput, ListTrailsCommandOutput } from "./commands/ListTrailsCommand"; import { LookupEventsCommandInput, LookupEventsCommandOutput } from "./commands/LookupEventsCommand"; @@ -70,49 +87,80 @@ import { PutInsightSelectorsCommandOutput, } from "./commands/PutInsightSelectorsCommand"; import { RemoveTagsCommandInput, RemoveTagsCommandOutput } from "./commands/RemoveTagsCommand"; +import { + RestoreEventDataStoreCommandInput, + RestoreEventDataStoreCommandOutput, +} from "./commands/RestoreEventDataStoreCommand"; import { StartLoggingCommandInput, StartLoggingCommandOutput } from "./commands/StartLoggingCommand"; +import { StartQueryCommandInput, StartQueryCommandOutput } from "./commands/StartQueryCommand"; import { StopLoggingCommandInput, StopLoggingCommandOutput } from "./commands/StopLoggingCommand"; +import { + UpdateEventDataStoreCommandInput, + UpdateEventDataStoreCommandOutput, +} from "./commands/UpdateEventDataStoreCommand"; import { UpdateTrailCommandInput, UpdateTrailCommandOutput } from "./commands/UpdateTrailCommand"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = | AddTagsCommandInput + | CancelQueryCommandInput + | CreateEventDataStoreCommandInput | CreateTrailCommandInput + | DeleteEventDataStoreCommandInput | DeleteTrailCommandInput + | DescribeQueryCommandInput | DescribeTrailsCommandInput + | GetEventDataStoreCommandInput | GetEventSelectorsCommandInput | GetInsightSelectorsCommandInput + | GetQueryResultsCommandInput | GetTrailCommandInput | GetTrailStatusCommandInput + | ListEventDataStoresCommandInput | ListPublicKeysCommandInput + | ListQueriesCommandInput | ListTagsCommandInput | ListTrailsCommandInput | LookupEventsCommandInput | PutEventSelectorsCommandInput | PutInsightSelectorsCommandInput | RemoveTagsCommandInput + | RestoreEventDataStoreCommandInput | StartLoggingCommandInput + | StartQueryCommandInput | StopLoggingCommandInput + | UpdateEventDataStoreCommandInput | UpdateTrailCommandInput; export type ServiceOutputTypes = | AddTagsCommandOutput + | CancelQueryCommandOutput + | CreateEventDataStoreCommandOutput | CreateTrailCommandOutput + | DeleteEventDataStoreCommandOutput | DeleteTrailCommandOutput + | DescribeQueryCommandOutput | DescribeTrailsCommandOutput + | GetEventDataStoreCommandOutput | GetEventSelectorsCommandOutput | GetInsightSelectorsCommandOutput + | GetQueryResultsCommandOutput | GetTrailCommandOutput | GetTrailStatusCommandOutput + | ListEventDataStoresCommandOutput | ListPublicKeysCommandOutput + | ListQueriesCommandOutput | ListTagsCommandOutput | ListTrailsCommandOutput | LookupEventsCommandOutput | PutEventSelectorsCommandOutput | PutInsightSelectorsCommandOutput | RemoveTagsCommandOutput + | RestoreEventDataStoreCommandOutput | StartLoggingCommandOutput + | StartQueryCommandOutput | StopLoggingCommandOutput + | UpdateEventDataStoreCommandOutput | UpdateTrailCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { diff --git a/clients/client-cloudtrail/src/commands/CancelQueryCommand.ts b/clients/client-cloudtrail/src/commands/CancelQueryCommand.ts new file mode 100644 index 000000000000..767926d2bd96 --- /dev/null +++ b/clients/client-cloudtrail/src/commands/CancelQueryCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { CancelQueryRequest, CancelQueryResponse } from "../models/models_0"; +import { + deserializeAws_json1_1CancelQueryCommand, + serializeAws_json1_1CancelQueryCommand, +} from "../protocols/Aws_json1_1"; + +export interface CancelQueryCommandInput extends CancelQueryRequest {} +export interface CancelQueryCommandOutput extends CancelQueryResponse, __MetadataBearer {} + +/** + *

Cancels a query if the query is not in a terminated state, such as CANCELLED, FAILED or FINISHED. You must specify an ARN value for EventDataStore. + * The ID of the query that you want to cancel is also required. When you run CancelQuery, the query status might + * show as CANCELLED even if the operation is not yet finished.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, CancelQueryCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, CancelQueryCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new CancelQueryCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CancelQueryCommandInput} for command's `input` shape. + * @see {@link CancelQueryCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class CancelQueryCommand extends $Command< + CancelQueryCommandInput, + CancelQueryCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CancelQueryCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "CancelQueryCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CancelQueryRequest.filterSensitiveLog, + outputFilterSensitiveLog: CancelQueryResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CancelQueryCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1CancelQueryCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1CancelQueryCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/CreateEventDataStoreCommand.ts b/clients/client-cloudtrail/src/commands/CreateEventDataStoreCommand.ts new file mode 100644 index 000000000000..6ac3106c4682 --- /dev/null +++ b/clients/client-cloudtrail/src/commands/CreateEventDataStoreCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { CreateEventDataStoreRequest, CreateEventDataStoreResponse } from "../models/models_0"; +import { + deserializeAws_json1_1CreateEventDataStoreCommand, + serializeAws_json1_1CreateEventDataStoreCommand, +} from "../protocols/Aws_json1_1"; + +export interface CreateEventDataStoreCommandInput extends CreateEventDataStoreRequest {} +export interface CreateEventDataStoreCommandOutput extends CreateEventDataStoreResponse, __MetadataBearer {} + +/** + *

Creates a new event data store.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, CreateEventDataStoreCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, CreateEventDataStoreCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new CreateEventDataStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateEventDataStoreCommandInput} for command's `input` shape. + * @see {@link CreateEventDataStoreCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class CreateEventDataStoreCommand extends $Command< + CreateEventDataStoreCommandInput, + CreateEventDataStoreCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateEventDataStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "CreateEventDataStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateEventDataStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateEventDataStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateEventDataStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1CreateEventDataStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1CreateEventDataStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/DeleteEventDataStoreCommand.ts b/clients/client-cloudtrail/src/commands/DeleteEventDataStoreCommand.ts new file mode 100644 index 000000000000..130a8e84969c --- /dev/null +++ b/clients/client-cloudtrail/src/commands/DeleteEventDataStoreCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { DeleteEventDataStoreRequest, DeleteEventDataStoreResponse } from "../models/models_0"; +import { + deserializeAws_json1_1DeleteEventDataStoreCommand, + serializeAws_json1_1DeleteEventDataStoreCommand, +} from "../protocols/Aws_json1_1"; + +export interface DeleteEventDataStoreCommandInput extends DeleteEventDataStoreRequest {} +export interface DeleteEventDataStoreCommandOutput extends DeleteEventDataStoreResponse, __MetadataBearer {} + +/** + *

Disables the event data store specified by EventDataStore, which accepts an event data store ARN. + * After you run DeleteEventDataStore, the event data store is automatically deleted after a wait period of + * seven days. TerminationProtectionEnabled must be set to False on the event data store; this + * operation cannot work if TerminationProtectionEnabled is True.

+ *

After you run DeleteEventDataStore on an event data store, you cannot run ListQueries, + * DescribeQuery, or GetQueryResults on queries that are using an event data store in a + * PENDING_DELETION state.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, DeleteEventDataStoreCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, DeleteEventDataStoreCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new DeleteEventDataStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteEventDataStoreCommandInput} for command's `input` shape. + * @see {@link DeleteEventDataStoreCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class DeleteEventDataStoreCommand extends $Command< + DeleteEventDataStoreCommandInput, + DeleteEventDataStoreCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteEventDataStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "DeleteEventDataStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteEventDataStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteEventDataStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteEventDataStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DeleteEventDataStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1DeleteEventDataStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/DescribeQueryCommand.ts b/clients/client-cloudtrail/src/commands/DescribeQueryCommand.ts new file mode 100644 index 000000000000..94f63f4bf25d --- /dev/null +++ b/clients/client-cloudtrail/src/commands/DescribeQueryCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { DescribeQueryRequest, DescribeQueryResponse } from "../models/models_0"; +import { + deserializeAws_json1_1DescribeQueryCommand, + serializeAws_json1_1DescribeQueryCommand, +} from "../protocols/Aws_json1_1"; + +export interface DescribeQueryCommandInput extends DescribeQueryRequest {} +export interface DescribeQueryCommandOutput extends DescribeQueryResponse, __MetadataBearer {} + +/** + *

Returns metadata about a query, including query run time in milliseconds, number of events scanned and matched, and query + * status. You must specify an ARN for EventDataStore, and a value for QueryID.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, DescribeQueryCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, DescribeQueryCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new DescribeQueryCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeQueryCommandInput} for command's `input` shape. + * @see {@link DescribeQueryCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class DescribeQueryCommand extends $Command< + DescribeQueryCommandInput, + DescribeQueryCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeQueryCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "DescribeQueryCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeQueryRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeQueryResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribeQueryCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DescribeQueryCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1DescribeQueryCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/GetEventDataStoreCommand.ts b/clients/client-cloudtrail/src/commands/GetEventDataStoreCommand.ts new file mode 100644 index 000000000000..720df042896c --- /dev/null +++ b/clients/client-cloudtrail/src/commands/GetEventDataStoreCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { GetEventDataStoreRequest, GetEventDataStoreResponse } from "../models/models_0"; +import { + deserializeAws_json1_1GetEventDataStoreCommand, + serializeAws_json1_1GetEventDataStoreCommand, +} from "../protocols/Aws_json1_1"; + +export interface GetEventDataStoreCommandInput extends GetEventDataStoreRequest {} +export interface GetEventDataStoreCommandOutput extends GetEventDataStoreResponse, __MetadataBearer {} + +/** + *

Returns information about an event data store specified as either an ARN or the ID portion of the ARN.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, GetEventDataStoreCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, GetEventDataStoreCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new GetEventDataStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetEventDataStoreCommandInput} for command's `input` shape. + * @see {@link GetEventDataStoreCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class GetEventDataStoreCommand extends $Command< + GetEventDataStoreCommandInput, + GetEventDataStoreCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetEventDataStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "GetEventDataStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetEventDataStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetEventDataStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetEventDataStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1GetEventDataStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1GetEventDataStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/GetQueryResultsCommand.ts b/clients/client-cloudtrail/src/commands/GetQueryResultsCommand.ts new file mode 100644 index 000000000000..6b90ea1e0175 --- /dev/null +++ b/clients/client-cloudtrail/src/commands/GetQueryResultsCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { GetQueryResultsRequest, GetQueryResultsResponse } from "../models/models_0"; +import { + deserializeAws_json1_1GetQueryResultsCommand, + serializeAws_json1_1GetQueryResultsCommand, +} from "../protocols/Aws_json1_1"; + +export interface GetQueryResultsCommandInput extends GetQueryResultsRequest {} +export interface GetQueryResultsCommandOutput extends GetQueryResultsResponse, __MetadataBearer {} + +/** + *

Gets event data results of a query. You must specify the QueryID value returned by the StartQuery + * operation, and an ARN for EventDataStore.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, GetQueryResultsCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, GetQueryResultsCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new GetQueryResultsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetQueryResultsCommandInput} for command's `input` shape. + * @see {@link GetQueryResultsCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class GetQueryResultsCommand extends $Command< + GetQueryResultsCommandInput, + GetQueryResultsCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetQueryResultsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "GetQueryResultsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetQueryResultsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetQueryResultsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetQueryResultsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1GetQueryResultsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1GetQueryResultsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/ListEventDataStoresCommand.ts b/clients/client-cloudtrail/src/commands/ListEventDataStoresCommand.ts new file mode 100644 index 000000000000..8d0228f12a79 --- /dev/null +++ b/clients/client-cloudtrail/src/commands/ListEventDataStoresCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { ListEventDataStoresRequest, ListEventDataStoresResponse } from "../models/models_0"; +import { + deserializeAws_json1_1ListEventDataStoresCommand, + serializeAws_json1_1ListEventDataStoresCommand, +} from "../protocols/Aws_json1_1"; + +export interface ListEventDataStoresCommandInput extends ListEventDataStoresRequest {} +export interface ListEventDataStoresCommandOutput extends ListEventDataStoresResponse, __MetadataBearer {} + +/** + *

Returns information about all event data stores in the account, in the current region.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, ListEventDataStoresCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, ListEventDataStoresCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new ListEventDataStoresCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListEventDataStoresCommandInput} for command's `input` shape. + * @see {@link ListEventDataStoresCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class ListEventDataStoresCommand extends $Command< + ListEventDataStoresCommandInput, + ListEventDataStoresCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListEventDataStoresCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "ListEventDataStoresCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListEventDataStoresRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListEventDataStoresResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListEventDataStoresCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1ListEventDataStoresCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1ListEventDataStoresCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/ListQueriesCommand.ts b/clients/client-cloudtrail/src/commands/ListQueriesCommand.ts new file mode 100644 index 000000000000..7d3ecda1b295 --- /dev/null +++ b/clients/client-cloudtrail/src/commands/ListQueriesCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { ListQueriesRequest, ListQueriesResponse } from "../models/models_0"; +import { + deserializeAws_json1_1ListQueriesCommand, + serializeAws_json1_1ListQueriesCommand, +} from "../protocols/Aws_json1_1"; + +export interface ListQueriesCommandInput extends ListQueriesRequest {} +export interface ListQueriesCommandOutput extends ListQueriesResponse, __MetadataBearer {} + +/** + *

Returns a list of queries and query statuses for the past seven days. You must specify an ARN value for + * EventDataStore. Optionally, to shorten the list of results, you can specify a time range, + * formatted as timestamps, by adding StartTime and EndTime parameters, and a + * QueryStatus value. Valid values for QueryStatus include QUEUED, RUNNING, + * FINISHED, FAILED, or CANCELLED.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, ListQueriesCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, ListQueriesCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new ListQueriesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListQueriesCommandInput} for command's `input` shape. + * @see {@link ListQueriesCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class ListQueriesCommand extends $Command< + ListQueriesCommandInput, + ListQueriesCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListQueriesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "ListQueriesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListQueriesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListQueriesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListQueriesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1ListQueriesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1ListQueriesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/PutInsightSelectorsCommand.ts b/clients/client-cloudtrail/src/commands/PutInsightSelectorsCommand.ts index d394230801ef..15ff6927a9ae 100644 --- a/clients/client-cloudtrail/src/commands/PutInsightSelectorsCommand.ts +++ b/clients/client-cloudtrail/src/commands/PutInsightSelectorsCommand.ts @@ -25,7 +25,7 @@ export interface PutInsightSelectorsCommandOutput extends PutInsightSelectorsRes *

Lets you enable Insights event logging by specifying the Insights * selectors that you want to enable on an existing trail. You also use * PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. - * The valid Insights event type in this release is ApiCallRateInsight.

+ * The valid Insights event types in this release are ApiErrorRateInsight and ApiCallRateInsight.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-cloudtrail/src/commands/RestoreEventDataStoreCommand.ts b/clients/client-cloudtrail/src/commands/RestoreEventDataStoreCommand.ts new file mode 100644 index 000000000000..a654733fb9a7 --- /dev/null +++ b/clients/client-cloudtrail/src/commands/RestoreEventDataStoreCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { RestoreEventDataStoreRequest, RestoreEventDataStoreResponse } from "../models/models_0"; +import { + deserializeAws_json1_1RestoreEventDataStoreCommand, + serializeAws_json1_1RestoreEventDataStoreCommand, +} from "../protocols/Aws_json1_1"; + +export interface RestoreEventDataStoreCommandInput extends RestoreEventDataStoreRequest {} +export interface RestoreEventDataStoreCommandOutput extends RestoreEventDataStoreResponse, __MetadataBearer {} + +/** + *

Restores a deleted event data store specified by EventDataStore, which accepts an event data store ARN. + * You can only restore a deleted event data store within the seven-day wait period after deletion. Restoring an event data store + * can take several minutes, depending on the size of the event data store.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, RestoreEventDataStoreCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, RestoreEventDataStoreCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new RestoreEventDataStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link RestoreEventDataStoreCommandInput} for command's `input` shape. + * @see {@link RestoreEventDataStoreCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class RestoreEventDataStoreCommand extends $Command< + RestoreEventDataStoreCommandInput, + RestoreEventDataStoreCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: RestoreEventDataStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "RestoreEventDataStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: RestoreEventDataStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: RestoreEventDataStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: RestoreEventDataStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1RestoreEventDataStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1RestoreEventDataStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/StartQueryCommand.ts b/clients/client-cloudtrail/src/commands/StartQueryCommand.ts new file mode 100644 index 000000000000..6bc35e5df5cd --- /dev/null +++ b/clients/client-cloudtrail/src/commands/StartQueryCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { StartQueryRequest, StartQueryResponse } from "../models/models_0"; +import { + deserializeAws_json1_1StartQueryCommand, + serializeAws_json1_1StartQueryCommand, +} from "../protocols/Aws_json1_1"; + +export interface StartQueryCommandInput extends StartQueryRequest {} +export interface StartQueryCommandOutput extends StartQueryResponse, __MetadataBearer {} + +/** + *

Starts a CloudTrail Lake query. The required QueryStatement + * parameter provides your SQL query, enclosed in single quotation marks.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, StartQueryCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, StartQueryCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new StartQueryCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StartQueryCommandInput} for command's `input` shape. + * @see {@link StartQueryCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class StartQueryCommand extends $Command< + StartQueryCommandInput, + StartQueryCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartQueryCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "StartQueryCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StartQueryRequest.filterSensitiveLog, + outputFilterSensitiveLog: StartQueryResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: StartQueryCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1StartQueryCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1StartQueryCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/UpdateEventDataStoreCommand.ts b/clients/client-cloudtrail/src/commands/UpdateEventDataStoreCommand.ts new file mode 100644 index 000000000000..3c5dd2c73550 --- /dev/null +++ b/clients/client-cloudtrail/src/commands/UpdateEventDataStoreCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CloudTrailClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudTrailClient"; +import { UpdateEventDataStoreRequest, UpdateEventDataStoreResponse } from "../models/models_0"; +import { + deserializeAws_json1_1UpdateEventDataStoreCommand, + serializeAws_json1_1UpdateEventDataStoreCommand, +} from "../protocols/Aws_json1_1"; + +export interface UpdateEventDataStoreCommandInput extends UpdateEventDataStoreRequest {} +export interface UpdateEventDataStoreCommandOutput extends UpdateEventDataStoreResponse, __MetadataBearer {} + +/** + *

Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. + * Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. + * RetentionPeriod is in days, and valid values are integers between 90 and 2555. + * By default, TerminationProtection is enabled. AdvancedEventSelectors includes or excludes management + * and data events in your event data store; for more information about AdvancedEventSelectors, see + * PutEventSelectorsRequest$AdvancedEventSelectors.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { CloudTrailClient, UpdateEventDataStoreCommand } from "@aws-sdk/client-cloudtrail"; // ES Modules import + * // const { CloudTrailClient, UpdateEventDataStoreCommand } = require("@aws-sdk/client-cloudtrail"); // CommonJS import + * const client = new CloudTrailClient(config); + * const command = new UpdateEventDataStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateEventDataStoreCommandInput} for command's `input` shape. + * @see {@link UpdateEventDataStoreCommandOutput} for command's `response` shape. + * @see {@link CloudTrailClientResolvedConfig | config} for CloudTrailClient's `config` shape. + * + */ +export class UpdateEventDataStoreCommand extends $Command< + UpdateEventDataStoreCommandInput, + UpdateEventDataStoreCommandOutput, + CloudTrailClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateEventDataStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: CloudTrailClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "CloudTrailClient"; + const commandName = "UpdateEventDataStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateEventDataStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateEventDataStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateEventDataStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1UpdateEventDataStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1UpdateEventDataStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-cloudtrail/src/commands/index.ts b/clients/client-cloudtrail/src/commands/index.ts index c3d219d3cbac..413249ccb84c 100644 --- a/clients/client-cloudtrail/src/commands/index.ts +++ b/clients/client-cloudtrail/src/commands/index.ts @@ -1,18 +1,29 @@ export * from "./AddTagsCommand"; +export * from "./CancelQueryCommand"; +export * from "./CreateEventDataStoreCommand"; export * from "./CreateTrailCommand"; +export * from "./DeleteEventDataStoreCommand"; export * from "./DeleteTrailCommand"; +export * from "./DescribeQueryCommand"; export * from "./DescribeTrailsCommand"; +export * from "./GetEventDataStoreCommand"; export * from "./GetEventSelectorsCommand"; export * from "./GetInsightSelectorsCommand"; +export * from "./GetQueryResultsCommand"; export * from "./GetTrailCommand"; export * from "./GetTrailStatusCommand"; +export * from "./ListEventDataStoresCommand"; export * from "./ListPublicKeysCommand"; +export * from "./ListQueriesCommand"; export * from "./ListTagsCommand"; export * from "./ListTrailsCommand"; export * from "./LookupEventsCommand"; export * from "./PutEventSelectorsCommand"; export * from "./PutInsightSelectorsCommand"; export * from "./RemoveTagsCommand"; +export * from "./RestoreEventDataStoreCommand"; export * from "./StartLoggingCommand"; +export * from "./StartQueryCommand"; export * from "./StopLoggingCommand"; +export * from "./UpdateEventDataStoreCommand"; export * from "./UpdateTrailCommand"; diff --git a/clients/client-cloudtrail/src/models/models_0.ts b/clients/client-cloudtrail/src/models/models_0.ts index d6614e414b68..3b764aa497c5 100644 --- a/clients/client-cloudtrail/src/models/models_0.ts +++ b/clients/client-cloudtrail/src/models/models_0.ts @@ -40,7 +40,7 @@ export interface AddTagsRequest { /** *

Contains a list of tags, up to a limit of 50

*/ - TagsList?: Tag[]; + TagsList: Tag[] | undefined; } export namespace AddTagsRequest { @@ -90,6 +90,71 @@ export namespace CloudTrailARNInvalidException { }); } +/** + *

This exception is thrown when the specified resource is not ready for an operation. + * This can occur when you try to run an operation on a trail before CloudTrail has time to fully load the trail. + * If this exception occurs, wait a few minutes, and then try the operation again.

+ */ +export interface ConflictException extends __SmithyException, $MetadataBearer { + name: "ConflictException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace ConflictException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConflictException): any => ({ + ...obj, + }); +} + +/** + *

The specified event data store was not found.

+ */ +export interface EventDataStoreNotFoundException extends __SmithyException, $MetadataBearer { + name: "EventDataStoreNotFoundException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace EventDataStoreNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EventDataStoreNotFoundException): any => ({ + ...obj, + }); +} + +/** + *

The event data store against which you ran your query is inactive.

+ */ +export interface InactiveEventDataStoreException extends __SmithyException, $MetadataBearer { + name: "InactiveEventDataStoreException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace InactiveEventDataStoreException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InactiveEventDataStoreException): any => ({ + ...obj, + }); +} + /** *

This exception is thrown when the specified tag key or values are not valid. * It can also occur if there are duplicate tags or too many tags on the resource.

@@ -604,13 +669,63 @@ export namespace AdvancedEventSelector { }); } +export interface CancelQueryRequest { + /** + *

The ARN (or the ID suffix of the ARN) of an event data store on which the specified query is running.

+ */ + EventDataStore: string | undefined; + + /** + *

The ID of the query that you want to cancel. The QueryId comes from the response of a StartQuery + * operation.

+ */ + QueryId: string | undefined; +} + +export namespace CancelQueryRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CancelQueryRequest): any => ({ + ...obj, + }); +} + +export enum QueryStatus { + CANCELLED = "CANCELLED", + FAILED = "FAILED", + FINISHED = "FINISHED", + QUEUED = "QUEUED", + RUNNING = "RUNNING", +} + +export interface CancelQueryResponse { + /** + *

The ID of the canceled query.

+ */ + QueryId: string | undefined; + + /** + *

Shows the status of a query after a CancelQuery request. Typically, the values shown are either + * RUNNING or CANCELLED.

+ */ + QueryStatus: QueryStatus | string | undefined; +} + +export namespace CancelQueryResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CancelQueryResponse): any => ({ + ...obj, + }); +} + /** - *

This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, - * see Enabling Trusted Access with Other Amazon Web Services Services - * and Prepare For Creating a Trail For Your Organization.

+ *

The specified event data store ARN is not valid or does not map to an event data store in your account.

*/ -export interface CloudTrailAccessNotEnabledException extends __SmithyException, $MetadataBearer { - name: "CloudTrailAccessNotEnabledException"; +export interface EventDataStoreARNInvalidException extends __SmithyException, $MetadataBearer { + name: "EventDataStoreARNInvalidException"; $fault: "client"; /** *

Brief description of the exception returned by the request.

@@ -618,22 +733,21 @@ export interface CloudTrailAccessNotEnabledException extends __SmithyException, Message?: string; } -export namespace CloudTrailAccessNotEnabledException { +export namespace EventDataStoreARNInvalidException { /** * @internal */ - export const filterSensitiveLog = (obj: CloudTrailAccessNotEnabledException): any => ({ + export const filterSensitiveLog = (obj: EventDataStoreARNInvalidException): any => ({ ...obj, }); } /** - *

This exception is thrown when a call results in the InvalidClientTokenId error code. - * This can occur when you are creating or updating a trail to send notifications to an Amazon SNS topic that - * is in a suspended Amazon Web Services account.

+ *

The specified query cannot be canceled because it is in the FINISHED, FAILED, or + * CANCELLED state.

*/ -export interface CloudTrailInvalidClientTokenIdException extends __SmithyException, $MetadataBearer { - name: "CloudTrailInvalidClientTokenIdException"; +export interface InactiveQueryException extends __SmithyException, $MetadataBearer { + name: "InactiveQueryException"; $fault: "client"; /** *

Brief description of the exception returned by the request.

@@ -641,20 +755,20 @@ export interface CloudTrailInvalidClientTokenIdException extends __SmithyExcepti Message?: string; } -export namespace CloudTrailInvalidClientTokenIdException { +export namespace InactiveQueryException { /** * @internal */ - export const filterSensitiveLog = (obj: CloudTrailInvalidClientTokenIdException): any => ({ + export const filterSensitiveLog = (obj: InactiveQueryException): any => ({ ...obj, }); } /** - *

Cannot set a CloudWatch Logs delivery for this region.

+ *

The request includes a parameter that is not valid.

*/ -export interface CloudWatchLogsDeliveryUnavailableException extends __SmithyException, $MetadataBearer { - name: "CloudWatchLogsDeliveryUnavailableException"; +export interface InvalidParameterException extends __SmithyException, $MetadataBearer { + name: "InvalidParameterException"; $fault: "client"; /** *

Brief description of the exception returned by the request.

@@ -662,122 +776,94 @@ export interface CloudWatchLogsDeliveryUnavailableException extends __SmithyExce Message?: string; } -export namespace CloudWatchLogsDeliveryUnavailableException { +export namespace InvalidParameterException { /** * @internal */ - export const filterSensitiveLog = (obj: CloudWatchLogsDeliveryUnavailableException): any => ({ + export const filterSensitiveLog = (obj: InvalidParameterException): any => ({ ...obj, }); } /** - *

Specifies the settings for each trail.

+ *

The query ID does not exist or does not map to a query.

*/ -export interface CreateTrailRequest { - /** - *

Specifies the name of the trail. The name must meet the following requirements:

- *
    - *
  • - *

    Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)

    - *
  • - *
  • - *

    Start with a letter or number, and end with a letter or number

    - *
  • - *
  • - *

    Be between 3 and 128 characters

    - *
  • - *
  • - *

    Have no adjacent periods, underscores or dashes. Names like my-_namespace - * and my--namespace are not valid.

    - *
  • - *
  • - *

    Not be in IP address format (for example, 192.168.5.4)

    - *
  • - *
- */ - Name: string | undefined; - +export interface QueryIdNotFoundException extends __SmithyException, $MetadataBearer { + name: "QueryIdNotFoundException"; + $fault: "client"; /** - *

Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

+ *

Brief description of the exception returned by the request.

*/ - S3BucketName: string | undefined; + Message?: string; +} +export namespace QueryIdNotFoundException { /** - *

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated - * for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

+ * @internal */ - S3KeyPrefix?: string; + export const filterSensitiveLog = (obj: QueryIdNotFoundException): any => ({ + ...obj, + }); +} +/** + *

This exception is thrown when trusted access has not been enabled between CloudTrail and Organizations. For more information, + * see Enabling Trusted Access with Other Amazon Web Services Services + * and Prepare For Creating a Trail For Your Organization.

+ */ +export interface CloudTrailAccessNotEnabledException extends __SmithyException, $MetadataBearer { + name: "CloudTrailAccessNotEnabledException"; + $fault: "client"; /** - *

Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.

+ *

Brief description of the exception returned by the request.

*/ - SnsTopicName?: string; + Message?: string; +} +export namespace CloudTrailAccessNotEnabledException { /** - *

Specifies whether the trail is publishing events from global services such as IAM to the log files.

+ * @internal */ - IncludeGlobalServiceEvents?: boolean; + export const filterSensitiveLog = (obj: CloudTrailAccessNotEnabledException): any => ({ + ...obj, + }); +} +export interface CreateEventDataStoreRequest { /** - *

Specifies whether the trail is created in the current region or in all regions. The default is false, which creates a trail only in the region where you are signed in. As a best practice, consider - * creating trails that log events in all regions.

+ *

The name of the event data store.

*/ - IsMultiRegionTrail?: boolean; + Name: string | undefined; /** - *

Specifies whether log file integrity validation is enabled. The default is false.

- * - *

When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail does - * not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. - * For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable - * it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on - * January 10. The same applies whenever you stop CloudTrail logging or delete a trail.

- *
+ *

The advanced event selectors to use to select the events for the data store. For more information about how to use advanced event + * selectors, see Log events by using advanced event selectors in the CloudTrail + * User Guide.

*/ - EnableLogFileValidation?: boolean; + AdvancedEventSelectors?: AdvancedEventSelector[]; /** - *

Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group - * to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.

+ *

Specifies whether the event data store includes events from all regions, or only from the region in which the event data store + * is created.

*/ - CloudWatchLogsLogGroupArn?: string; + MultiRegionEnabled?: boolean; /** - *

Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

+ *

Specifies whether an event data store collects events logged for an organization in Organizations.

*/ - CloudWatchLogsRoleArn?: string; + OrganizationEnabled?: boolean; /** - *

Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The - * value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully - * specified ARN to a key, or a globally unique identifier.

- *

CloudTrail also supports KMS multi-Region keys. For more information about multi-Region keys, - * see Using multi-Region keys in the Key Management Service Developer Guide.

- *

Examples:

- *
    - *
  • - *

    alias/MyAliasName

    - *
  • - *
  • - *

    arn:aws:kms:us-east-2:123456789012:alias/MyAliasName

    - *
  • - *
  • - *

    arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012

    - *
  • - *
  • - *

    12345678-1234-1234-1234-123456789012

    - *
  • - *
+ *

The retention period of the event data store, in days. You can set a retention period of up to 2555 days, + * the equivalent of seven years.

*/ - KmsKeyId?: string; + RetentionPeriod?: number; /** - *

Specifies whether the trail is created for all accounts in an organization in Organizations, or only for the current Amazon Web Services account. - * The default is false, and cannot be true unless the call is made on behalf of an Amazon Web Services account that is the management account for an organization in - * Organizations.

+ *

Specifies whether termination protection is enabled for the event data store. If termination protection is enabled, you + * cannot delete the event data store until termination protection is disabled.

*/ - IsOrganizationTrail?: boolean; + TerminationProtectionEnabled?: boolean; /** *

A list of tags.

@@ -785,104 +871,126 @@ export interface CreateTrailRequest { TagsList?: Tag[]; } -export namespace CreateTrailRequest { +export namespace CreateEventDataStoreRequest { /** * @internal */ - export const filterSensitiveLog = (obj: CreateTrailRequest): any => ({ + export const filterSensitiveLog = (obj: CreateEventDataStoreRequest): any => ({ ...obj, }); } -/** - *

Returns the objects or data listed below if successful. Otherwise, returns an error.

- */ -export interface CreateTrailResponse { +export enum EventDataStoreStatus { + CREATED = "CREATED", + ENABLED = "ENABLED", + PENDING_DELETION = "PENDING_DELETION", +} + +export interface CreateEventDataStoreResponse { /** - *

Specifies the name of the trail.

+ *

The ARN of the event data store.

*/ - Name?: string; + EventDataStoreArn?: string; /** - *

Specifies the name of the Amazon S3 bucket designated for publishing log files.

+ *

The name of the event data store.

*/ - S3BucketName?: string; + Name?: string; /** - *

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated - * for log file delivery. For more information, see Finding Your CloudTrail Log Files.

+ *

The status of event data store creation.

*/ - S3KeyPrefix?: string; + Status?: EventDataStoreStatus | string; /** - * @deprecated - * - *

This field is no longer in use. Use SnsTopicARN.

+ *

The advanced event selectors that were used to select the events for the data store.

*/ - SnsTopicName?: string; + AdvancedEventSelectors?: AdvancedEventSelector[]; /** - *

Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications when log files are delivered. The format of a topic ARN is:

- *

- * arn:aws:sns:us-east-2:123456789012:MyTopic - *

+ *

Indicates whether the event data store collects events from all regions, or only from the region in which it was created.

*/ - SnsTopicARN?: string; + MultiRegionEnabled?: boolean; /** - *

Specifies whether the trail is publishing events from global services such as IAM to the log files.

+ *

Indicates whether an event data store is collecting logged events for an organization in Organizations.

*/ - IncludeGlobalServiceEvents?: boolean; + OrganizationEnabled?: boolean; /** - *

Specifies whether the trail exists in one region or in all regions.

+ *

The retention period of an event data store, in days.

*/ - IsMultiRegionTrail?: boolean; + RetentionPeriod?: number; /** - *

Specifies the ARN of the trail that was created. The format of a trail ARN - * is:

- *

- * arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail - *

+ *

Indicates whether termination protection is enabled for the event data store.

*/ - TrailARN?: string; + TerminationProtectionEnabled?: boolean; /** - *

Specifies whether log file integrity validation is enabled.

+ *

A list of tags.

*/ - LogFileValidationEnabled?: boolean; + TagsList?: Tag[]; /** - *

Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.

+ *

The timestamp that shows when the event data store was created.

*/ - CloudWatchLogsLogGroupArn?: string; + CreatedTimestamp?: Date; /** - *

Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

+ *

The timestamp that shows when an event data store was updated, if applicable. + * UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

*/ - CloudWatchLogsRoleArn?: string; + UpdatedTimestamp?: Date; +} +export namespace CreateEventDataStoreResponse { /** - *

Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. - * The value is a fully specified ARN to a KMS key in the following format.

- *

- * arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012 - *

+ * @internal */ - KmsKeyId?: string; + export const filterSensitiveLog = (obj: CreateEventDataStoreResponse): any => ({ + ...obj, + }); +} +/** + *

An event data store with that name already exists.

+ */ +export interface EventDataStoreAlreadyExistsException extends __SmithyException, $MetadataBearer { + name: "EventDataStoreAlreadyExistsException"; + $fault: "client"; /** - *

Specifies whether the trail is an organization trail.

+ *

Brief description of the exception returned by the request.

*/ - IsOrganizationTrail?: boolean; + Message?: string; } -export namespace CreateTrailResponse { +export namespace EventDataStoreAlreadyExistsException { /** * @internal */ - export const filterSensitiveLog = (obj: CreateTrailResponse): any => ({ + export const filterSensitiveLog = (obj: EventDataStoreAlreadyExistsException): any => ({ + ...obj, + }); +} + +/** + *

Your account has used the maximum number of event data stores.

+ */ +export interface EventDataStoreMaxLimitExceededException extends __SmithyException, $MetadataBearer { + name: "EventDataStoreMaxLimitExceededException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace EventDataStoreMaxLimitExceededException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EventDataStoreMaxLimitExceededException): any => ({ ...obj, }); } @@ -910,6 +1018,311 @@ export namespace InsufficientDependencyServiceAccessPermissionException { }); } +/** + *

This exception is thrown when Organizations is not configured to support all features. All features must be enabled in Organizations to support + * creating an organization trail. For more information, see + * Prepare For Creating a Trail For Your Organization.

+ */ +export interface OrganizationNotInAllFeaturesModeException extends __SmithyException, $MetadataBearer { + name: "OrganizationNotInAllFeaturesModeException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace OrganizationNotInAllFeaturesModeException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OrganizationNotInAllFeaturesModeException): any => ({ + ...obj, + }); +} + +/** + *

This exception is thrown when the request is made from an Amazon Web Services account that is not a member of an organization. + * To make this request, sign in using the credentials of an account that belongs to an organization.

+ */ +export interface OrganizationsNotInUseException extends __SmithyException, $MetadataBearer { + name: "OrganizationsNotInUseException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace OrganizationsNotInUseException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OrganizationsNotInUseException): any => ({ + ...obj, + }); +} + +/** + *

This exception is thrown when a call results in the InvalidClientTokenId error code. + * This can occur when you are creating or updating a trail to send notifications to an Amazon SNS topic that + * is in a suspended Amazon Web Services account.

+ */ +export interface CloudTrailInvalidClientTokenIdException extends __SmithyException, $MetadataBearer { + name: "CloudTrailInvalidClientTokenIdException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace CloudTrailInvalidClientTokenIdException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CloudTrailInvalidClientTokenIdException): any => ({ + ...obj, + }); +} + +/** + *

Cannot set a CloudWatch Logs delivery for this region.

+ */ +export interface CloudWatchLogsDeliveryUnavailableException extends __SmithyException, $MetadataBearer { + name: "CloudWatchLogsDeliveryUnavailableException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace CloudWatchLogsDeliveryUnavailableException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CloudWatchLogsDeliveryUnavailableException): any => ({ + ...obj, + }); +} + +/** + *

Specifies the settings for each trail.

+ */ +export interface CreateTrailRequest { + /** + *

Specifies the name of the trail. The name must meet the following requirements:

+ *
    + *
  • + *

    Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores (_), or dashes (-)

    + *
  • + *
  • + *

    Start with a letter or number, and end with a letter or number

    + *
  • + *
  • + *

    Be between 3 and 128 characters

    + *
  • + *
  • + *

    Have no adjacent periods, underscores or dashes. Names like my-_namespace + * and my--namespace are not valid.

    + *
  • + *
  • + *

    Not be in IP address format (for example, 192.168.5.4)

    + *
  • + *
+ */ + Name: string | undefined; + + /** + *

Specifies the name of the Amazon S3 bucket designated for publishing log files. See Amazon S3 Bucket Naming Requirements.

+ */ + S3BucketName: string | undefined; + + /** + *

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated + * for log file delivery. For more information, see Finding Your CloudTrail Log Files. The maximum length is 200 characters.

+ */ + S3KeyPrefix?: string; + + /** + *

Specifies the name of the Amazon SNS topic defined for notification of log file delivery. The maximum length is 256 characters.

+ */ + SnsTopicName?: string; + + /** + *

Specifies whether the trail is publishing events from global services such as IAM to the log files.

+ */ + IncludeGlobalServiceEvents?: boolean; + + /** + *

Specifies whether the trail is created in the current region or in all regions. The default is false, which creates a trail only in the region where you are signed in. As a best practice, consider + * creating trails that log events in all regions.

+ */ + IsMultiRegionTrail?: boolean; + + /** + *

Specifies whether log file integrity validation is enabled. The default is false.

+ * + *

When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail does + * not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. + * For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable + * it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on + * January 10. The same applies whenever you stop CloudTrail logging or delete a trail.

+ *
+ */ + EnableLogFileValidation?: boolean; + + /** + *

Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group + * to which CloudTrail logs will be delivered. Not required unless you specify CloudWatchLogsRoleArn.

+ */ + CloudWatchLogsLogGroupArn?: string; + + /** + *

Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

+ */ + CloudWatchLogsRoleArn?: string; + + /** + *

Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. The + * value can be an alias name prefixed by "alias/", a fully specified ARN to an alias, a fully + * specified ARN to a key, or a globally unique identifier.

+ *

CloudTrail also supports KMS multi-Region keys. For more information about multi-Region keys, + * see Using multi-Region keys in the Key Management Service Developer Guide.

+ *

Examples:

+ *
    + *
  • + *

    alias/MyAliasName

    + *
  • + *
  • + *

    arn:aws:kms:us-east-2:123456789012:alias/MyAliasName

    + *
  • + *
  • + *

    arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012

    + *
  • + *
  • + *

    12345678-1234-1234-1234-123456789012

    + *
  • + *
+ */ + KmsKeyId?: string; + + /** + *

Specifies whether the trail is created for all accounts in an organization in Organizations, or only for the current Amazon Web Services account. + * The default is false, and cannot be true unless the call is made on behalf of an Amazon Web Services account that is the management account for an organization in + * Organizations.

+ */ + IsOrganizationTrail?: boolean; + + /** + *

A list of tags.

+ */ + TagsList?: Tag[]; +} + +export namespace CreateTrailRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateTrailRequest): any => ({ + ...obj, + }); +} + +/** + *

Returns the objects or data listed below if successful. Otherwise, returns an error.

+ */ +export interface CreateTrailResponse { + /** + *

Specifies the name of the trail.

+ */ + Name?: string; + + /** + *

Specifies the name of the Amazon S3 bucket designated for publishing log files.

+ */ + S3BucketName?: string; + + /** + *

Specifies the Amazon S3 key prefix that comes after the name of the bucket you have designated + * for log file delivery. For more information, see Finding Your CloudTrail Log Files.

+ */ + S3KeyPrefix?: string; + + /** + * @deprecated + * + *

This field is no longer in use. Use SnsTopicARN.

+ */ + SnsTopicName?: string; + + /** + *

Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications when log files are delivered. The format of a topic ARN is:

+ *

+ * arn:aws:sns:us-east-2:123456789012:MyTopic + *

+ */ + SnsTopicARN?: string; + + /** + *

Specifies whether the trail is publishing events from global services such as IAM to the log files.

+ */ + IncludeGlobalServiceEvents?: boolean; + + /** + *

Specifies whether the trail exists in one region or in all regions.

+ */ + IsMultiRegionTrail?: boolean; + + /** + *

Specifies the ARN of the trail that was created. The format of a trail ARN + * is:

+ *

+ * arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail + *

+ */ + TrailARN?: string; + + /** + *

Specifies whether log file integrity validation is enabled.

+ */ + LogFileValidationEnabled?: boolean; + + /** + *

Specifies the Amazon Resource Name (ARN) of the log group to which CloudTrail logs will be delivered.

+ */ + CloudWatchLogsLogGroupArn?: string; + + /** + *

Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group.

+ */ + CloudWatchLogsRoleArn?: string; + + /** + *

Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. + * The value is a fully specified ARN to a KMS key in the following format.

+ *

+ * arn:aws:kms:us-east-2:123456789012:key/12345678-1234-1234-1234-123456789012 + *

+ */ + KmsKeyId?: string; + + /** + *

Specifies whether the trail is an organization trail.

+ */ + IsOrganizationTrail?: boolean; +} + +export namespace CreateTrailResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateTrailResponse): any => ({ + ...obj, + }); +} + /** *

This exception is thrown when the policy on the S3 bucket or KMS key is not sufficient.

*/ @@ -1142,56 +1555,12 @@ export namespace KmsException { } /** - * @deprecated - * - *

This exception is no longer in use.

- */ -export interface KmsKeyDisabledException extends __SmithyException, $MetadataBearer { - name: "KmsKeyDisabledException"; - $fault: "client"; - /** - *

Brief description of the exception returned by the request.

- */ - Message?: string; -} - -export namespace KmsKeyDisabledException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: KmsKeyDisabledException): any => ({ - ...obj, - }); -} - -/** - *

This exception is thrown when the KMS key does not exist, when the S3 bucket and the - * KMS key are not in the same region, or when the KMS key associated with the Amazon SNS - * topic either does not exist or is not in the same region.

- */ -export interface KmsKeyNotFoundException extends __SmithyException, $MetadataBearer { - name: "KmsKeyNotFoundException"; - $fault: "client"; - /** - *

Brief description of the exception returned by the request.

- */ - Message?: string; -} - -export namespace KmsKeyNotFoundException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: KmsKeyNotFoundException): any => ({ - ...obj, - }); -} - -/** - *

This exception is thrown when the maximum number of trails is reached.

+ * @deprecated + * + *

This exception is no longer in use.

*/ -export interface MaximumNumberOfTrailsExceededException extends __SmithyException, $MetadataBearer { - name: "MaximumNumberOfTrailsExceededException"; +export interface KmsKeyDisabledException extends __SmithyException, $MetadataBearer { + name: "KmsKeyDisabledException"; $fault: "client"; /** *

Brief description of the exception returned by the request.

@@ -1199,22 +1568,22 @@ export interface MaximumNumberOfTrailsExceededException extends __SmithyExceptio Message?: string; } -export namespace MaximumNumberOfTrailsExceededException { +export namespace KmsKeyDisabledException { /** * @internal */ - export const filterSensitiveLog = (obj: MaximumNumberOfTrailsExceededException): any => ({ + export const filterSensitiveLog = (obj: KmsKeyDisabledException): any => ({ ...obj, }); } /** - *

This exception is thrown when Organizations is not configured to support all features. All features must be enabled in Organizations to support - * creating an organization trail. For more information, see - * Prepare For Creating a Trail For Your Organization.

+ *

This exception is thrown when the KMS key does not exist, when the S3 bucket and the + * KMS key are not in the same region, or when the KMS key associated with the Amazon SNS + * topic either does not exist or is not in the same region.

*/ -export interface OrganizationNotInAllFeaturesModeException extends __SmithyException, $MetadataBearer { - name: "OrganizationNotInAllFeaturesModeException"; +export interface KmsKeyNotFoundException extends __SmithyException, $MetadataBearer { + name: "KmsKeyNotFoundException"; $fault: "client"; /** *

Brief description of the exception returned by the request.

@@ -1222,21 +1591,20 @@ export interface OrganizationNotInAllFeaturesModeException extends __SmithyExcep Message?: string; } -export namespace OrganizationNotInAllFeaturesModeException { +export namespace KmsKeyNotFoundException { /** * @internal */ - export const filterSensitiveLog = (obj: OrganizationNotInAllFeaturesModeException): any => ({ + export const filterSensitiveLog = (obj: KmsKeyNotFoundException): any => ({ ...obj, }); } /** - *

This exception is thrown when the request is made from an Amazon Web Services account that is not a member of an organization. - * To make this request, sign in using the credentials of an account that belongs to an organization.

+ *

This exception is thrown when the maximum number of trails is reached.

*/ -export interface OrganizationsNotInUseException extends __SmithyException, $MetadataBearer { - name: "OrganizationsNotInUseException"; +export interface MaximumNumberOfTrailsExceededException extends __SmithyException, $MetadataBearer { + name: "MaximumNumberOfTrailsExceededException"; $fault: "client"; /** *

Brief description of the exception returned by the request.

@@ -1244,11 +1612,11 @@ export interface OrganizationsNotInUseException extends __SmithyException, $Meta Message?: string; } -export namespace OrganizationsNotInUseException { +export namespace MaximumNumberOfTrailsExceededException { /** * @internal */ - export const filterSensitiveLog = (obj: OrganizationsNotInUseException): any => ({ + export const filterSensitiveLog = (obj: MaximumNumberOfTrailsExceededException): any => ({ ...obj, }); } @@ -1316,13 +1684,38 @@ export namespace TrailNotProvidedException { }); } +export interface DeleteEventDataStoreRequest { + /** + *

The ARN (or the ID suffix of the ARN) of the event data store to delete.

+ */ + EventDataStore: string | undefined; +} + +export namespace DeleteEventDataStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEventDataStoreRequest): any => ({ + ...obj, + }); +} + +export interface DeleteEventDataStoreResponse {} + +export namespace DeleteEventDataStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEventDataStoreResponse): any => ({ + ...obj, + }); +} + /** - *

This exception is thrown when the specified resource is not ready for an operation. - * This can occur when you try to run an operation on a trail before CloudTrail has time to fully load the trail. - * If this exception occurs, wait a few minutes, and then try the operation again.

+ *

The event data store cannot be deleted because termination protection is enabled for it.

*/ -export interface ConflictException extends __SmithyException, $MetadataBearer { - name: "ConflictException"; +export interface EventDataStoreTerminationProtectedException extends __SmithyException, $MetadataBearer { + name: "EventDataStoreTerminationProtectedException"; $fault: "client"; /** *

Brief description of the exception returned by the request.

@@ -1330,11 +1723,11 @@ export interface ConflictException extends __SmithyException, $MetadataBearer { Message?: string; } -export namespace ConflictException { +export namespace EventDataStoreTerminationProtectedException { /** * @internal */ - export const filterSensitiveLog = (obj: ConflictException): any => ({ + export const filterSensitiveLog = (obj: EventDataStoreTerminationProtectedException): any => ({ ...obj, }); } @@ -1417,6 +1810,101 @@ export namespace TrailNotFoundException { }); } +export interface DescribeQueryRequest { + /** + *

The ARN (or the ID suffix of the ARN) of an event data store on which the specified query was run.

+ */ + EventDataStore: string | undefined; + + /** + *

The query ID.

+ */ + QueryId: string | undefined; +} + +export namespace DescribeQueryRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeQueryRequest): any => ({ + ...obj, + }); +} + +/** + *

Gets metadata about a query, including the number of events that were matched, the total number of events scanned, the query run time + * in milliseconds, and the query's creation time.

+ */ +export interface QueryStatisticsForDescribeQuery { + /** + *

The number of events that matched a query.

+ */ + EventsMatched?: number; + + /** + *

The number of events that the query scanned in the event data store.

+ */ + EventsScanned?: number; + + /** + *

The query's run time, in milliseconds.

+ */ + ExecutionTimeInMillis?: number; + + /** + *

The creation time of the query.

+ */ + CreationTime?: Date; +} + +export namespace QueryStatisticsForDescribeQuery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryStatisticsForDescribeQuery): any => ({ + ...obj, + }); +} + +export interface DescribeQueryResponse { + /** + *

The ID of the query.

+ */ + QueryId?: string; + + /** + *

The SQL code of a query.

+ */ + QueryString?: string; + + /** + *

The status of a query. Values for QueryStatus include QUEUED, RUNNING, + * FINISHED, FAILED, or CANCELLED + *

+ */ + QueryStatus?: QueryStatus | string; + + /** + *

Metadata about a query, including the number of events that were matched, the total number of events scanned, the query run time + * in milliseconds, and the query's creation time.

+ */ + QueryStatistics?: QueryStatisticsForDescribeQuery; + + /** + *

The error message returned if a query failed.

+ */ + ErrorMessage?: string; +} + +export namespace DescribeQueryResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeQueryResponse): any => ({ + ...obj, + }); +} + /** *

Returns information about the trail.

*/ @@ -1592,6 +2080,83 @@ export namespace DescribeTrailsResponse { }); } +export interface GetEventDataStoreRequest { + /** + *

The ARN (or ID suffix of the ARN) of the event data store about which you want information.

+ */ + EventDataStore: string | undefined; +} + +export namespace GetEventDataStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEventDataStoreRequest): any => ({ + ...obj, + }); +} + +export interface GetEventDataStoreResponse { + /** + *

The event data store Amazon Resource Number (ARN).

+ */ + EventDataStoreArn?: string; + + /** + *

The name of the event data store.

+ */ + Name?: string; + + /** + *

The status of an event data store. Values can be ENABLED and PENDING_DELETION.

+ */ + Status?: EventDataStoreStatus | string; + + /** + *

The advanced event selectors used to select events for the data store.

+ */ + AdvancedEventSelectors?: AdvancedEventSelector[]; + + /** + *

Indicates whether the event data store includes events from all regions, or only from the region in which it was created.

+ */ + MultiRegionEnabled?: boolean; + + /** + *

Indicates whether an event data store is collecting logged events for an organization in Organizations.

+ */ + OrganizationEnabled?: boolean; + + /** + *

The retention period of the event data store, in days.

+ */ + RetentionPeriod?: number; + + /** + *

Indicates that termination protection is enabled.

+ */ + TerminationProtectionEnabled?: boolean; + + /** + *

The timestamp of the event data store's creation.

+ */ + CreatedTimestamp?: Date; + + /** + *

Shows the time that an event data store was updated, if applicable. UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

+ */ + UpdatedTimestamp?: Date; +} + +export namespace GetEventDataStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEventDataStoreResponse): any => ({ + ...obj, + }); +} + export interface GetEventSelectorsRequest { /** *

Specifies the name of the trail or trail ARN. If you specify a trail name, the @@ -1923,68 +2488,182 @@ export interface GetInsightSelectorsRequest { * arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail *

*/ - TrailName: string | undefined; + TrailName: string | undefined; +} + +export namespace GetInsightSelectorsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetInsightSelectorsRequest): any => ({ + ...obj, + }); +} + +export enum InsightType { + ApiCallRateInsight = "ApiCallRateInsight", + ApiErrorRateInsight = "ApiErrorRateInsight", +} + +/** + *

A JSON string that contains a list of insight types that are logged on a trail.

+ */ +export interface InsightSelector { + /** + *

The type of insights to log on a trail. ApiCallRateInsight and ApiErrorRateInsight are valid insight types.

+ */ + InsightType?: InsightType | string; +} + +export namespace InsightSelector { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InsightSelector): any => ({ + ...obj, + }); +} + +export interface GetInsightSelectorsResponse { + /** + *

The Amazon Resource Name (ARN) of a trail for which you want to get Insights selectors.

+ */ + TrailARN?: string; + + /** + *

A JSON string that contains the insight types you want to log on a trail. In this release, ApiErrorRateInsight and + * ApiCallRateInsight are supported as insight types.

+ */ + InsightSelectors?: InsightSelector[]; +} + +export namespace GetInsightSelectorsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetInsightSelectorsResponse): any => ({ + ...obj, + }); +} + +/** + *

If you run GetInsightSelectors on a trail that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException.

+ */ +export interface InsightNotEnabledException extends __SmithyException, $MetadataBearer { + name: "InsightNotEnabledException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace InsightNotEnabledException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InsightNotEnabledException): any => ({ + ...obj, + }); +} + +export interface GetQueryResultsRequest { + /** + *

The ARN (or ID suffix of the ARN) of the event data store against which the query was run.

+ */ + EventDataStore: string | undefined; + + /** + *

The ID of the query for which you want to get results.

+ */ + QueryId: string | undefined; + + /** + *

A token you can use to get the next page of query results.

+ */ + NextToken?: string; + + /** + *

The maximum number of query results to display on a single page.

+ */ + MaxQueryResults?: number; } -export namespace GetInsightSelectorsRequest { +export namespace GetQueryResultsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: GetInsightSelectorsRequest): any => ({ + export const filterSensitiveLog = (obj: GetQueryResultsRequest): any => ({ ...obj, }); } -export enum InsightType { - ApiCallRateInsight = "ApiCallRateInsight", - ApiErrorRateInsight = "ApiErrorRateInsight", -} - /** - *

A JSON string that contains a list of insight types that are logged on a trail.

+ *

Metadata about a query, such as the number of results.

*/ -export interface InsightSelector { +export interface QueryStatistics { /** - *

The type of Insights events to log on a trail. The valid Insights type in this release is ApiCallRateInsight.

+ *

The number of results returned.

*/ - InsightType?: InsightType | string; + ResultsCount?: number; + + /** + *

The total number of results returned by a query.

+ */ + TotalResultsCount?: number; } -export namespace InsightSelector { +export namespace QueryStatistics { /** * @internal */ - export const filterSensitiveLog = (obj: InsightSelector): any => ({ + export const filterSensitiveLog = (obj: QueryStatistics): any => ({ ...obj, }); } -export interface GetInsightSelectorsResponse { +export interface GetQueryResultsResponse { /** - *

The Amazon Resource Name (ARN) of a trail for which you want to get Insights selectors.

+ *

The status of the query. Values include QUEUED, RUNNING, FINISHED, FAILED, + * or CANCELLED.

*/ - TrailARN?: string; + QueryStatus?: QueryStatus | string; /** - *

A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight is supported as an insight type.

+ *

Shows the count of query results.

*/ - InsightSelectors?: InsightSelector[]; + QueryStatistics?: QueryStatistics; + + /** + *

Contains the individual event results of the query.

+ */ + QueryResultRows?: { [key: string]: string }[][]; + + /** + *

A token you can use to get the next page of query results.

+ */ + NextToken?: string; + + /** + *

The error message returned if a query failed.

+ */ + ErrorMessage?: string; } -export namespace GetInsightSelectorsResponse { +export namespace GetQueryResultsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: GetInsightSelectorsResponse): any => ({ + export const filterSensitiveLog = (obj: GetQueryResultsResponse): any => ({ ...obj, }); } /** - *

If you run GetInsightSelectors on a trail that does not have Insights events enabled, the operation throws the exception InsightNotEnabledException.

+ *

This exception is thrown if the limit specified is not valid.

*/ -export interface InsightNotEnabledException extends __SmithyException, $MetadataBearer { - name: "InsightNotEnabledException"; +export interface InvalidMaxResultsException extends __SmithyException, $MetadataBearer { + name: "InvalidMaxResultsException"; $fault: "client"; /** *

Brief description of the exception returned by the request.

@@ -1992,11 +2671,32 @@ export interface InsightNotEnabledException extends __SmithyException, $Metadata Message?: string; } -export namespace InsightNotEnabledException { +export namespace InvalidMaxResultsException { /** * @internal */ - export const filterSensitiveLog = (obj: InsightNotEnabledException): any => ({ + export const filterSensitiveLog = (obj: InvalidMaxResultsException): any => ({ + ...obj, + }); +} + +/** + *

A token that is not valid, or a token that was previously used in a request with different parameters. This exception is thrown if the token is not valid.

+ */ +export interface InvalidNextTokenException extends __SmithyException, $MetadataBearer { + name: "InvalidNextTokenException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace InvalidNextTokenException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InvalidNextTokenException): any => ({ ...obj, }); } @@ -2173,6 +2873,115 @@ export namespace GetTrailStatusResponse { }); } +export interface ListEventDataStoresRequest { + /** + *

A token you can use to get the next page of event data store results.

+ */ + NextToken?: string; + + /** + *

The maximum number of event data stores to display on a single page.

+ */ + MaxResults?: number; +} + +export namespace ListEventDataStoresRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListEventDataStoresRequest): any => ({ + ...obj, + }); +} + +/** + *

A storage lake of event data against which you can run complex SQL-based queries. An event data store can include events + * that you have logged on your account from the last 90 to 2555 days + * (about three months to up to seven years). To select events for an event data store, + * use advanced event selectors.

+ */ +export interface EventDataStore { + /** + *

The ARN of the event data store.

+ */ + EventDataStoreArn?: string; + + /** + *

The name of the event data store.

+ */ + Name?: string; + + /** + *

Indicates whether the event data store is protected from termination.

+ */ + TerminationProtectionEnabled?: boolean; + + /** + *

The status of an event data store. Values are ENABLED and PENDING_DELETION.

+ */ + Status?: EventDataStoreStatus | string; + + /** + *

The advanced event selectors that were used to select events for the data store.

+ */ + AdvancedEventSelectors?: AdvancedEventSelector[]; + + /** + *

Indicates whether the event data store includes events from all regions, or only from the region in which it was created.

+ */ + MultiRegionEnabled?: boolean; + + /** + *

Indicates that an event data store is collecting logged events for an organization.

+ */ + OrganizationEnabled?: boolean; + + /** + *

The retention period, in days.

+ */ + RetentionPeriod?: number; + + /** + *

The timestamp of the event data store's creation.

+ */ + CreatedTimestamp?: Date; + + /** + *

The timestamp showing when an event data store was updated, if applicable. UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

+ */ + UpdatedTimestamp?: Date; +} + +export namespace EventDataStore { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EventDataStore): any => ({ + ...obj, + }); +} + +export interface ListEventDataStoresResponse { + /** + *

Contains information about event data stores in the account, in the current region.

+ */ + EventDataStores?: EventDataStore[]; + + /** + *

A token you can use to get the next page of results.

+ */ + NextToken?: string; +} + +export namespace ListEventDataStoresResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListEventDataStoresResponse): any => ({ + ...obj, + }); +} + /** *

Occurs if the timestamp values are not valid. Either the start time occurs after the end time, or the time range is outside the range of possible values.

*/ @@ -2256,53 +3065,192 @@ export interface PublicKey { Value?: Uint8Array; /** - *

The starting time of validity of the public key.

+ *

The starting time of validity of the public key.

+ */ + ValidityStartTime?: Date; + + /** + *

The ending time of validity of the public key.

+ */ + ValidityEndTime?: Date; + + /** + *

The fingerprint of the public key.

+ */ + Fingerprint?: string; +} + +export namespace PublicKey { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PublicKey): any => ({ + ...obj, + }); +} + +/** + *

Returns the objects or data listed below if successful. Otherwise, returns an error.

+ */ +export interface ListPublicKeysResponse { + /** + *

Contains an array of PublicKey objects.

+ * + *

The returned public keys may have validity time ranges that overlap.

+ *
+ */ + PublicKeyList?: PublicKey[]; + + /** + *

Reserved for future use.

+ */ + NextToken?: string; +} + +export namespace ListPublicKeysResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPublicKeysResponse): any => ({ + ...obj, + }); +} + +/** + *

A date range for the query was specified that is not valid. For more information + * about writing a query, see Create + * or edit a query in the CloudTrail User Guide.

+ */ +export interface InvalidDateRangeException extends __SmithyException, $MetadataBearer { + name: "InvalidDateRangeException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace InvalidDateRangeException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InvalidDateRangeException): any => ({ + ...obj, + }); +} + +/** + *

The query status is not valid for the operation.

+ */ +export interface InvalidQueryStatusException extends __SmithyException, $MetadataBearer { + name: "InvalidQueryStatusException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace InvalidQueryStatusException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InvalidQueryStatusException): any => ({ + ...obj, + }); +} + +export interface ListQueriesRequest { + /** + *

The ARN (or the ID suffix of the ARN) of an event data store on which queries were run.

+ */ + EventDataStore: string | undefined; + + /** + *

A token you can use to get the next page of results.

+ */ + NextToken?: string; + + /** + *

The maximum number of queries to show on a page.

+ */ + MaxResults?: number; + + /** + *

Use with EndTime to bound a ListQueries request, and limit its results to only those queries run + * within a specified time period.

+ */ + StartTime?: Date; + + /** + *

Use with StartTime to bound a ListQueries request, and limit its results to only those queries run + * within a specified time period.

+ */ + EndTime?: Date; + + /** + *

The status of queries that you want to return in results. Valid values for QueryStatus include QUEUED, RUNNING, + * FINISHED, FAILED, or CANCELLED.

+ */ + QueryStatus?: QueryStatus | string; +} + +export namespace ListQueriesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListQueriesRequest): any => ({ + ...obj, + }); +} + +/** + *

A SQL string of criteria about events that you want to collect in an event data store.

+ */ +export interface Query { + /** + *

The ID of a query.

*/ - ValidityStartTime?: Date; + QueryId?: string; /** - *

The ending time of validity of the public key.

+ *

The status of the query. This can be QUEUED, RUNNING, FINISHED, FAILED, + * or CANCELLED.

*/ - ValidityEndTime?: Date; + QueryStatus?: QueryStatus | string; /** - *

The fingerprint of the public key.

+ *

The creation time of a query.

*/ - Fingerprint?: string; + CreationTime?: Date; } -export namespace PublicKey { +export namespace Query { /** * @internal */ - export const filterSensitiveLog = (obj: PublicKey): any => ({ + export const filterSensitiveLog = (obj: Query): any => ({ ...obj, }); } -/** - *

Returns the objects or data listed below if successful. Otherwise, returns an error.

- */ -export interface ListPublicKeysResponse { +export interface ListQueriesResponse { /** - *

Contains an array of PublicKey objects.

- * - *

The returned public keys may have validity time ranges that overlap.

- *
+ *

Lists matching query results, and shows query ID, status, and creation time of each query.

*/ - PublicKeyList?: PublicKey[]; + Queries?: Query[]; /** - *

Reserved for future use.

+ *

A token you can use to get the next page of results.

*/ NextToken?: string; } -export namespace ListPublicKeysResponse { +export namespace ListQueriesResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListPublicKeysResponse): any => ({ + export const filterSensitiveLog = (obj: ListQueriesResponse): any => ({ ...obj, }); } @@ -2497,48 +3445,6 @@ export namespace InvalidLookupAttributesException { }); } -/** - *

This exception is thrown if the limit specified is not valid.

- */ -export interface InvalidMaxResultsException extends __SmithyException, $MetadataBearer { - name: "InvalidMaxResultsException"; - $fault: "client"; - /** - *

Brief description of the exception returned by the request.

- */ - Message?: string; -} - -export namespace InvalidMaxResultsException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: InvalidMaxResultsException): any => ({ - ...obj, - }); -} - -/** - *

A token that is not valid, or a token that was previously used in a request with different parameters. This exception is thrown if the token is not valid.

- */ -export interface InvalidNextTokenException extends __SmithyException, $MetadataBearer { - name: "InvalidNextTokenException"; - $fault: "client"; - /** - *

Brief description of the exception returned by the request.

- */ - Message?: string; -} - -export namespace InvalidNextTokenException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: InvalidNextTokenException): any => ({ - ...obj, - }); -} - export enum EventCategory { Insight = "insight", } @@ -2898,7 +3804,7 @@ export interface PutInsightSelectorsRequest { TrailName: string | undefined; /** - *

A JSON string that contains the Insights types that you want to log on a trail. The valid Insights type in this release is ApiCallRateInsight.

+ *

A JSON string that contains the insight types you want to log on a trail. ApiCallRateInsight and ApiErrorRateInsight are valid insight types.

*/ InsightSelectors: InsightSelector[] | undefined; } @@ -2919,7 +3825,8 @@ export interface PutInsightSelectorsResponse { TrailARN?: string; /** - *

A JSON string that contains the Insights event types that you want to log on a trail. The valid Insights type in this release is ApiCallRateInsight.

+ *

A JSON string that contains the Insights event types that you want to log on a trail. The valid Insights types in this release are + * ApiErrorRateInsight and ApiCallRateInsight.

*/ InsightSelectors?: InsightSelector[]; } @@ -2948,7 +3855,7 @@ export interface RemoveTagsRequest { /** *

Specifies a list of tags to be removed.

*/ - TagsList?: Tag[]; + TagsList: Tag[] | undefined; } export namespace RemoveTagsRequest { @@ -2974,6 +3881,106 @@ export namespace RemoveTagsResponse { }); } +/** + *

The event data store is not in a status that supports the operation.

+ */ +export interface InvalidEventDataStoreStatusException extends __SmithyException, $MetadataBearer { + name: "InvalidEventDataStoreStatusException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace InvalidEventDataStoreStatusException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InvalidEventDataStoreStatusException): any => ({ + ...obj, + }); +} + +export interface RestoreEventDataStoreRequest { + /** + *

The ARN (or the ID suffix of the ARN) of the event data store that you want to restore.

+ */ + EventDataStore: string | undefined; +} + +export namespace RestoreEventDataStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreEventDataStoreRequest): any => ({ + ...obj, + }); +} + +export interface RestoreEventDataStoreResponse { + /** + *

The event data store ARN.

+ */ + EventDataStoreArn?: string; + + /** + *

The name of the event data store.

+ */ + Name?: string; + + /** + *

The status of the event data store.

+ */ + Status?: EventDataStoreStatus | string; + + /** + *

The advanced event selectors that were used to select events.

+ */ + AdvancedEventSelectors?: AdvancedEventSelector[]; + + /** + *

Indicates whether the event data store is collecting events from all regions, or only from the region in which the event data + * store was created.

+ */ + MultiRegionEnabled?: boolean; + + /** + *

Indicates whether an event data store is collecting logged events for an organization in Organizations.

+ */ + OrganizationEnabled?: boolean; + + /** + *

The retention period, in days.

+ */ + RetentionPeriod?: number; + + /** + *

Indicates that termination protection is enabled and the event data store cannot be automatically deleted.

+ */ + TerminationProtectionEnabled?: boolean; + + /** + *

The timestamp of an event data store's creation.

+ */ + CreatedTimestamp?: Date; + + /** + *

The timestamp that shows when an event data store was updated, if applicable. + * UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

+ */ + UpdatedTimestamp?: Date; +} + +export namespace RestoreEventDataStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreEventDataStoreResponse): any => ({ + ...obj, + }); +} + /** *

The request to CloudTrail to start logging Amazon Web Services API calls for an account.

*/ @@ -3011,6 +4018,83 @@ export namespace StartLoggingResponse { }); } +/** + *

The query that was submitted has validation errors, or uses incorrect syntax or unsupported keywords. For more information + * about writing a query, see Create + * or edit a query in the CloudTrail User Guide.

+ */ +export interface InvalidQueryStatementException extends __SmithyException, $MetadataBearer { + name: "InvalidQueryStatementException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace InvalidQueryStatementException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InvalidQueryStatementException): any => ({ + ...obj, + }); +} + +/** + *

You are already running the maximum number of concurrent queries. Wait a minute for some queries to finish, and then + * run the query again.

+ */ +export interface MaxConcurrentQueriesException extends __SmithyException, $MetadataBearer { + name: "MaxConcurrentQueriesException"; + $fault: "client"; + /** + *

Brief description of the exception returned by the request.

+ */ + Message?: string; +} + +export namespace MaxConcurrentQueriesException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MaxConcurrentQueriesException): any => ({ + ...obj, + }); +} + +export interface StartQueryRequest { + /** + *

The SQL code of your query.

+ */ + QueryStatement: string | undefined; +} + +export namespace StartQueryRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartQueryRequest): any => ({ + ...obj, + }); +} + +export interface StartQueryResponse { + /** + *

The ID of the started query.

+ */ + QueryId?: string; +} + +export namespace StartQueryResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartQueryResponse): any => ({ + ...obj, + }); +} + /** *

Passes the request to CloudTrail to stop logging Amazon Web Services API calls for the specified account.

*/ @@ -3048,6 +4132,113 @@ export namespace StopLoggingResponse { }); } +export interface UpdateEventDataStoreRequest { + /** + *

The ARN (or the ID suffix of the ARN) of the event data store that you want to update.

+ */ + EventDataStore: string | undefined; + + /** + *

The event data store name.

+ */ + Name?: string; + + /** + *

The advanced event selectors used to select events for the event data store.

+ */ + AdvancedEventSelectors?: AdvancedEventSelector[]; + + /** + *

Specifies whether an event data store collects events from all regions, or only from the region in which it was created.

+ */ + MultiRegionEnabled?: boolean; + + /** + *

Specifies whether an event data store collects events logged for an organization in Organizations.

+ */ + OrganizationEnabled?: boolean; + + /** + *

The retention period, in days.

+ */ + RetentionPeriod?: number; + + /** + *

Indicates that termination protection is enabled and the event data store cannot be automatically deleted.

+ */ + TerminationProtectionEnabled?: boolean; +} + +export namespace UpdateEventDataStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEventDataStoreRequest): any => ({ + ...obj, + }); +} + +export interface UpdateEventDataStoreResponse { + /** + *

The ARN of the event data store.

+ */ + EventDataStoreArn?: string; + + /** + *

The name of the event data store.

+ */ + Name?: string; + + /** + *

The status of an event data store. Values can be ENABLED and PENDING_DELETION.

+ */ + Status?: EventDataStoreStatus | string; + + /** + *

The advanced event selectors that are applied to the event data store.

+ */ + AdvancedEventSelectors?: AdvancedEventSelector[]; + + /** + *

Indicates whether the event data store includes events from all regions, or only from the region in which it was created.

+ */ + MultiRegionEnabled?: boolean; + + /** + *

Indicates whether an event data store is collecting logged events for an organization in Organizations.

+ */ + OrganizationEnabled?: boolean; + + /** + *

The retention period, in days.

+ */ + RetentionPeriod?: number; + + /** + *

Indicates whether termination protection is enabled for the event data store.

+ */ + TerminationProtectionEnabled?: boolean; + + /** + *

The timestamp that shows when an event data store was first created.

+ */ + CreatedTimestamp?: Date; + + /** + *

The timestamp that shows when the event data store was last updated. UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

+ */ + UpdatedTimestamp?: Date; +} + +export namespace UpdateEventDataStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEventDataStoreResponse): any => ({ + ...obj, + }); +} + /** *

Specifies settings to update for the trail.

*/ diff --git a/clients/client-cloudtrail/src/pagination/GetQueryResultsPaginator.ts b/clients/client-cloudtrail/src/pagination/GetQueryResultsPaginator.ts new file mode 100644 index 000000000000..75c844e2901a --- /dev/null +++ b/clients/client-cloudtrail/src/pagination/GetQueryResultsPaginator.ts @@ -0,0 +1,58 @@ +import { Paginator } from "@aws-sdk/types"; + +import { CloudTrail } from "../CloudTrail"; +import { CloudTrailClient } from "../CloudTrailClient"; +import { + GetQueryResultsCommand, + GetQueryResultsCommandInput, + GetQueryResultsCommandOutput, +} from "../commands/GetQueryResultsCommand"; +import { CloudTrailPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: CloudTrailClient, + input: GetQueryResultsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new GetQueryResultsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: CloudTrail, + input: GetQueryResultsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.getQueryResults(input, ...args); +}; +export async function* paginateGetQueryResults( + config: CloudTrailPaginationConfiguration, + input: GetQueryResultsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: GetQueryResultsCommandOutput; + while (hasNext) { + input.NextToken = token; + if (config.client instanceof CloudTrail) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof CloudTrailClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected CloudTrail | CloudTrailClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-cloudtrail/src/pagination/ListEventDataStoresPaginator.ts b/clients/client-cloudtrail/src/pagination/ListEventDataStoresPaginator.ts new file mode 100644 index 000000000000..9ddbed107f9c --- /dev/null +++ b/clients/client-cloudtrail/src/pagination/ListEventDataStoresPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { CloudTrail } from "../CloudTrail"; +import { CloudTrailClient } from "../CloudTrailClient"; +import { + ListEventDataStoresCommand, + ListEventDataStoresCommandInput, + ListEventDataStoresCommandOutput, +} from "../commands/ListEventDataStoresCommand"; +import { CloudTrailPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: CloudTrailClient, + input: ListEventDataStoresCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListEventDataStoresCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: CloudTrail, + input: ListEventDataStoresCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listEventDataStores(input, ...args); +}; +export async function* paginateListEventDataStores( + config: CloudTrailPaginationConfiguration, + input: ListEventDataStoresCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListEventDataStoresCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof CloudTrail) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof CloudTrailClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected CloudTrail | CloudTrailClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-cloudtrail/src/pagination/ListQueriesPaginator.ts b/clients/client-cloudtrail/src/pagination/ListQueriesPaginator.ts new file mode 100644 index 000000000000..1233929e1d16 --- /dev/null +++ b/clients/client-cloudtrail/src/pagination/ListQueriesPaginator.ts @@ -0,0 +1,55 @@ +import { Paginator } from "@aws-sdk/types"; + +import { CloudTrail } from "../CloudTrail"; +import { CloudTrailClient } from "../CloudTrailClient"; +import { ListQueriesCommand, ListQueriesCommandInput, ListQueriesCommandOutput } from "../commands/ListQueriesCommand"; +import { CloudTrailPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: CloudTrailClient, + input: ListQueriesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListQueriesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: CloudTrail, + input: ListQueriesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listQueries(input, ...args); +}; +export async function* paginateListQueries( + config: CloudTrailPaginationConfiguration, + input: ListQueriesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListQueriesCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof CloudTrail) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof CloudTrailClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected CloudTrail | CloudTrailClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-cloudtrail/src/pagination/index.ts b/clients/client-cloudtrail/src/pagination/index.ts index c31544628652..59857856df89 100644 --- a/clients/client-cloudtrail/src/pagination/index.ts +++ b/clients/client-cloudtrail/src/pagination/index.ts @@ -1,5 +1,8 @@ +export * from "./GetQueryResultsPaginator"; export * from "./Interfaces"; +export * from "./ListEventDataStoresPaginator"; export * from "./ListPublicKeysPaginator"; +export * from "./ListQueriesPaginator"; export * from "./ListTagsPaginator"; export * from "./ListTrailsPaginator"; export * from "./LookupEventsPaginator"; diff --git a/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts b/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts index 9a4b20ab19fd..b719f492fee7 100644 --- a/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts +++ b/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts @@ -1,6 +1,8 @@ import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; import { expectBoolean as __expectBoolean, + expectInt32 as __expectInt32, + expectLong as __expectLong, expectNonNull as __expectNonNull, expectNumber as __expectNumber, expectString as __expectString, @@ -16,17 +18,34 @@ import { } from "@aws-sdk/types"; import { AddTagsCommandInput, AddTagsCommandOutput } from "../commands/AddTagsCommand"; +import { CancelQueryCommandInput, CancelQueryCommandOutput } from "../commands/CancelQueryCommand"; +import { + CreateEventDataStoreCommandInput, + CreateEventDataStoreCommandOutput, +} from "../commands/CreateEventDataStoreCommand"; import { CreateTrailCommandInput, CreateTrailCommandOutput } from "../commands/CreateTrailCommand"; +import { + DeleteEventDataStoreCommandInput, + DeleteEventDataStoreCommandOutput, +} from "../commands/DeleteEventDataStoreCommand"; import { DeleteTrailCommandInput, DeleteTrailCommandOutput } from "../commands/DeleteTrailCommand"; +import { DescribeQueryCommandInput, DescribeQueryCommandOutput } from "../commands/DescribeQueryCommand"; import { DescribeTrailsCommandInput, DescribeTrailsCommandOutput } from "../commands/DescribeTrailsCommand"; +import { GetEventDataStoreCommandInput, GetEventDataStoreCommandOutput } from "../commands/GetEventDataStoreCommand"; import { GetEventSelectorsCommandInput, GetEventSelectorsCommandOutput } from "../commands/GetEventSelectorsCommand"; import { GetInsightSelectorsCommandInput, GetInsightSelectorsCommandOutput, } from "../commands/GetInsightSelectorsCommand"; +import { GetQueryResultsCommandInput, GetQueryResultsCommandOutput } from "../commands/GetQueryResultsCommand"; import { GetTrailCommandInput, GetTrailCommandOutput } from "../commands/GetTrailCommand"; import { GetTrailStatusCommandInput, GetTrailStatusCommandOutput } from "../commands/GetTrailStatusCommand"; +import { + ListEventDataStoresCommandInput, + ListEventDataStoresCommandOutput, +} from "../commands/ListEventDataStoresCommand"; import { ListPublicKeysCommandInput, ListPublicKeysCommandOutput } from "../commands/ListPublicKeysCommand"; +import { ListQueriesCommandInput, ListQueriesCommandOutput } from "../commands/ListQueriesCommand"; import { ListTagsCommandInput, ListTagsCommandOutput } from "../commands/ListTagsCommand"; import { ListTrailsCommandInput, ListTrailsCommandOutput } from "../commands/ListTrailsCommand"; import { LookupEventsCommandInput, LookupEventsCommandOutput } from "../commands/LookupEventsCommand"; @@ -36,36 +55,65 @@ import { PutInsightSelectorsCommandOutput, } from "../commands/PutInsightSelectorsCommand"; import { RemoveTagsCommandInput, RemoveTagsCommandOutput } from "../commands/RemoveTagsCommand"; +import { + RestoreEventDataStoreCommandInput, + RestoreEventDataStoreCommandOutput, +} from "../commands/RestoreEventDataStoreCommand"; import { StartLoggingCommandInput, StartLoggingCommandOutput } from "../commands/StartLoggingCommand"; +import { StartQueryCommandInput, StartQueryCommandOutput } from "../commands/StartQueryCommand"; import { StopLoggingCommandInput, StopLoggingCommandOutput } from "../commands/StopLoggingCommand"; +import { + UpdateEventDataStoreCommandInput, + UpdateEventDataStoreCommandOutput, +} from "../commands/UpdateEventDataStoreCommand"; import { UpdateTrailCommandInput, UpdateTrailCommandOutput } from "../commands/UpdateTrailCommand"; import { AddTagsRequest, AddTagsResponse, AdvancedEventSelector, AdvancedFieldSelector, + CancelQueryRequest, + CancelQueryResponse, CloudTrailAccessNotEnabledException, CloudTrailARNInvalidException, CloudTrailInvalidClientTokenIdException, CloudWatchLogsDeliveryUnavailableException, ConflictException, + CreateEventDataStoreRequest, + CreateEventDataStoreResponse, CreateTrailRequest, CreateTrailResponse, DataResource, + DeleteEventDataStoreRequest, + DeleteEventDataStoreResponse, DeleteTrailRequest, DeleteTrailResponse, + DescribeQueryRequest, + DescribeQueryResponse, DescribeTrailsRequest, DescribeTrailsResponse, Event, + EventDataStore, + EventDataStoreAlreadyExistsException, + EventDataStoreARNInvalidException, + EventDataStoreMaxLimitExceededException, + EventDataStoreNotFoundException, + EventDataStoreTerminationProtectedException, EventSelector, + GetEventDataStoreRequest, + GetEventDataStoreResponse, GetEventSelectorsRequest, GetEventSelectorsResponse, GetInsightSelectorsRequest, GetInsightSelectorsResponse, + GetQueryResultsRequest, + GetQueryResultsResponse, GetTrailRequest, GetTrailResponse, GetTrailStatusRequest, GetTrailStatusResponse, + InactiveEventDataStoreException, + InactiveQueryException, InsightNotEnabledException, InsightSelector, InsufficientDependencyServiceAccessPermissionException, @@ -74,7 +122,9 @@ import { InsufficientSnsTopicPolicyException, InvalidCloudWatchLogsLogGroupArnException, InvalidCloudWatchLogsRoleArnException, + InvalidDateRangeException, InvalidEventCategoryException, + InvalidEventDataStoreStatusException, InvalidEventSelectorsException, InvalidHomeRegionException, InvalidInsightSelectorsException, @@ -83,6 +133,9 @@ import { InvalidMaxResultsException, InvalidNextTokenException, InvalidParameterCombinationException, + InvalidParameterException, + InvalidQueryStatementException, + InvalidQueryStatusException, InvalidS3BucketNameException, InvalidS3PrefixException, InvalidSnsTopicNameException, @@ -93,8 +146,12 @@ import { KmsException, KmsKeyDisabledException, KmsKeyNotFoundException, + ListEventDataStoresRequest, + ListEventDataStoresResponse, ListPublicKeysRequest, ListPublicKeysResponse, + ListQueriesRequest, + ListQueriesResponse, ListTagsRequest, ListTagsResponse, ListTrailsRequest, @@ -102,6 +159,7 @@ import { LookupAttribute, LookupEventsRequest, LookupEventsResponse, + MaxConcurrentQueriesException, MaximumNumberOfTrailsExceededException, NotOrganizationMasterAccountException, OperationNotPermittedException, @@ -112,15 +170,23 @@ import { PutEventSelectorsResponse, PutInsightSelectorsRequest, PutInsightSelectorsResponse, + Query, + QueryIdNotFoundException, + QueryStatistics, + QueryStatisticsForDescribeQuery, RemoveTagsRequest, RemoveTagsResponse, Resource, ResourceNotFoundException, ResourceTag, ResourceTypeNotSupportedException, + RestoreEventDataStoreRequest, + RestoreEventDataStoreResponse, S3BucketDoesNotExistException, StartLoggingRequest, StartLoggingResponse, + StartQueryRequest, + StartQueryResponse, StopLoggingRequest, StopLoggingResponse, Tag, @@ -131,6 +197,8 @@ import { TrailNotFoundException, TrailNotProvidedException, UnsupportedOperationException, + UpdateEventDataStoreRequest, + UpdateEventDataStoreResponse, UpdateTrailRequest, UpdateTrailResponse, } from "../models/models_0"; @@ -148,6 +216,32 @@ export const serializeAws_json1_1AddTagsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1CancelQueryCommand = async ( + input: CancelQueryCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.CancelQuery", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1CancelQueryRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_1CreateEventDataStoreCommand = async ( + input: CreateEventDataStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.CreateEventDataStore", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1CreateEventDataStoreRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1CreateTrailCommand = async ( input: CreateTrailCommandInput, context: __SerdeContext @@ -161,6 +255,19 @@ export const serializeAws_json1_1CreateTrailCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DeleteEventDataStoreCommand = async ( + input: DeleteEventDataStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.DeleteEventDataStore", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DeleteEventDataStoreRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DeleteTrailCommand = async ( input: DeleteTrailCommandInput, context: __SerdeContext @@ -174,6 +281,19 @@ export const serializeAws_json1_1DeleteTrailCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DescribeQueryCommand = async ( + input: DescribeQueryCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.DescribeQuery", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DescribeQueryRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DescribeTrailsCommand = async ( input: DescribeTrailsCommandInput, context: __SerdeContext @@ -187,6 +307,19 @@ export const serializeAws_json1_1DescribeTrailsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1GetEventDataStoreCommand = async ( + input: GetEventDataStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.GetEventDataStore", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1GetEventDataStoreRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1GetEventSelectorsCommand = async ( input: GetEventSelectorsCommandInput, context: __SerdeContext @@ -213,6 +346,19 @@ export const serializeAws_json1_1GetInsightSelectorsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1GetQueryResultsCommand = async ( + input: GetQueryResultsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.GetQueryResults", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1GetQueryResultsRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1GetTrailCommand = async ( input: GetTrailCommandInput, context: __SerdeContext @@ -239,6 +385,19 @@ export const serializeAws_json1_1GetTrailStatusCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1ListEventDataStoresCommand = async ( + input: ListEventDataStoresCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.ListEventDataStores", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1ListEventDataStoresRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1ListPublicKeysCommand = async ( input: ListPublicKeysCommandInput, context: __SerdeContext @@ -252,6 +411,19 @@ export const serializeAws_json1_1ListPublicKeysCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1ListQueriesCommand = async ( + input: ListQueriesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.ListQueries", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1ListQueriesRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1ListTagsCommand = async ( input: ListTagsCommandInput, context: __SerdeContext @@ -330,6 +502,19 @@ export const serializeAws_json1_1RemoveTagsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1RestoreEventDataStoreCommand = async ( + input: RestoreEventDataStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.RestoreEventDataStore", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1RestoreEventDataStoreRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1StartLoggingCommand = async ( input: StartLoggingCommandInput, context: __SerdeContext @@ -343,6 +528,19 @@ export const serializeAws_json1_1StartLoggingCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1StartQueryCommand = async ( + input: StartQueryCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.StartQuery", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1StartQueryRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1StopLoggingCommand = async ( input: StopLoggingCommandInput, context: __SerdeContext @@ -356,6 +554,19 @@ export const serializeAws_json1_1StopLoggingCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1UpdateEventDataStoreCommand = async ( + input: UpdateEventDataStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "CloudTrail_20131101.UpdateEventDataStore", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1UpdateEventDataStoreRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1UpdateTrailCommand = async ( input: UpdateTrailCommandInput, context: __SerdeContext @@ -406,6 +617,30 @@ const deserializeAws_json1_1AddTagsCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ConflictException": + case "com.amazonaws.cloudtrail#ConflictException": + response = { + ...(await deserializeAws_json1_1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": + response = { + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InactiveEventDataStoreException": + case "com.amazonaws.cloudtrail#InactiveEventDataStoreException": + response = { + ...(await deserializeAws_json1_1InactiveEventDataStoreExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InvalidTagParameterException": case "com.amazonaws.cloudtrail#InvalidTagParameterException": response = { @@ -487,27 +722,27 @@ const deserializeAws_json1_1AddTagsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1CreateTrailCommand = async ( +export const deserializeAws_json1_1CancelQueryCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1CreateTrailCommandError(output, context); + return deserializeAws_json1_1CancelQueryCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1CreateTrailResponse(data, context); - const response: CreateTrailCommandOutput = { + contents = deserializeAws_json1_1CancelQueryResponse(data, context); + const response: CancelQueryCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1CreateTrailCommandError = async ( +const deserializeAws_json1_1CancelQueryCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -516,154 +751,425 @@ const deserializeAws_json1_1CreateTrailCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "CloudTrailAccessNotEnabledException": - case "com.amazonaws.cloudtrail#CloudTrailAccessNotEnabledException": + case "ConflictException": + case "com.amazonaws.cloudtrail#ConflictException": response = { - ...(await deserializeAws_json1_1CloudTrailAccessNotEnabledExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ConflictExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "CloudTrailInvalidClientTokenIdException": - case "com.amazonaws.cloudtrail#CloudTrailInvalidClientTokenIdException": + case "EventDataStoreARNInvalidException": + case "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": response = { - ...(await deserializeAws_json1_1CloudTrailInvalidClientTokenIdExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "CloudWatchLogsDeliveryUnavailableException": - case "com.amazonaws.cloudtrail#CloudWatchLogsDeliveryUnavailableException": + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": response = { - ...(await deserializeAws_json1_1CloudWatchLogsDeliveryUnavailableExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InsufficientDependencyServiceAccessPermissionException": - case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": + case "InactiveEventDataStoreException": + case "com.amazonaws.cloudtrail#InactiveEventDataStoreException": response = { - ...(await deserializeAws_json1_1InsufficientDependencyServiceAccessPermissionExceptionResponse( - parsedOutput, - context - )), + ...(await deserializeAws_json1_1InactiveEventDataStoreExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InsufficientEncryptionPolicyException": - case "com.amazonaws.cloudtrail#InsufficientEncryptionPolicyException": + case "InactiveQueryException": + case "com.amazonaws.cloudtrail#InactiveQueryException": response = { - ...(await deserializeAws_json1_1InsufficientEncryptionPolicyExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InactiveQueryExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InsufficientS3BucketPolicyException": - case "com.amazonaws.cloudtrail#InsufficientS3BucketPolicyException": + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": response = { - ...(await deserializeAws_json1_1InsufficientS3BucketPolicyExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InsufficientSnsTopicPolicyException": - case "com.amazonaws.cloudtrail#InsufficientSnsTopicPolicyException": + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": response = { - ...(await deserializeAws_json1_1InsufficientSnsTopicPolicyExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidCloudWatchLogsLogGroupArnException": - case "com.amazonaws.cloudtrail#InvalidCloudWatchLogsLogGroupArnException": + case "QueryIdNotFoundException": + case "com.amazonaws.cloudtrail#QueryIdNotFoundException": response = { - ...(await deserializeAws_json1_1InvalidCloudWatchLogsLogGroupArnExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1QueryIdNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidCloudWatchLogsRoleArnException": - case "com.amazonaws.cloudtrail#InvalidCloudWatchLogsRoleArnException": + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": response = { - ...(await deserializeAws_json1_1InvalidCloudWatchLogsRoleArnExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidKmsKeyIdException": - case "com.amazonaws.cloudtrail#InvalidKmsKeyIdException": + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; response = { - ...(await deserializeAws_json1_1InvalidKmsKeyIdExceptionResponse(parsedOutput, context)), + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1CreateEventDataStoreCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1CreateEventDataStoreCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1CreateEventDataStoreResponse(data, context); + const response: CreateEventDataStoreCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1CreateEventDataStoreCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "CloudTrailAccessNotEnabledException": + case "com.amazonaws.cloudtrail#CloudTrailAccessNotEnabledException": + response = { + ...(await deserializeAws_json1_1CloudTrailAccessNotEnabledExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidParameterCombinationException": - case "com.amazonaws.cloudtrail#InvalidParameterCombinationException": + case "ConflictException": + case "com.amazonaws.cloudtrail#ConflictException": response = { - ...(await deserializeAws_json1_1InvalidParameterCombinationExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ConflictExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidS3BucketNameException": - case "com.amazonaws.cloudtrail#InvalidS3BucketNameException": + case "EventDataStoreAlreadyExistsException": + case "com.amazonaws.cloudtrail#EventDataStoreAlreadyExistsException": response = { - ...(await deserializeAws_json1_1InvalidS3BucketNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreAlreadyExistsExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidS3PrefixException": - case "com.amazonaws.cloudtrail#InvalidS3PrefixException": + case "EventDataStoreMaxLimitExceededException": + case "com.amazonaws.cloudtrail#EventDataStoreMaxLimitExceededException": response = { - ...(await deserializeAws_json1_1InvalidS3PrefixExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreMaxLimitExceededExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidSnsTopicNameException": - case "com.amazonaws.cloudtrail#InvalidSnsTopicNameException": + case "InsufficientDependencyServiceAccessPermissionException": + case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": response = { - ...(await deserializeAws_json1_1InvalidSnsTopicNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InsufficientDependencyServiceAccessPermissionExceptionResponse( + parsedOutput, + context + )), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTagParameterException": - case "com.amazonaws.cloudtrail#InvalidTagParameterException": + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": response = { - ...(await deserializeAws_json1_1InvalidTagParameterExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTrailNameException": - case "com.amazonaws.cloudtrail#InvalidTrailNameException": + case "InvalidTagParameterException": + case "com.amazonaws.cloudtrail#InvalidTagParameterException": response = { - ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidTagParameterExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "KmsException": - case "com.amazonaws.cloudtrail#KmsException": + case "NotOrganizationMasterAccountException": + case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": response = { - ...(await deserializeAws_json1_1KmsExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "KmsKeyDisabledException": - case "com.amazonaws.cloudtrail#KmsKeyDisabledException": + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": response = { - ...(await deserializeAws_json1_1KmsKeyDisabledExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "KmsKeyNotFoundException": + case "OrganizationNotInAllFeaturesModeException": + case "com.amazonaws.cloudtrail#OrganizationNotInAllFeaturesModeException": + response = { + ...(await deserializeAws_json1_1OrganizationNotInAllFeaturesModeExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OrganizationsNotInUseException": + case "com.amazonaws.cloudtrail#OrganizationsNotInUseException": + response = { + ...(await deserializeAws_json1_1OrganizationsNotInUseExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1CreateTrailCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1CreateTrailCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1CreateTrailResponse(data, context); + const response: CreateTrailCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1CreateTrailCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "CloudTrailAccessNotEnabledException": + case "com.amazonaws.cloudtrail#CloudTrailAccessNotEnabledException": + response = { + ...(await deserializeAws_json1_1CloudTrailAccessNotEnabledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "CloudTrailInvalidClientTokenIdException": + case "com.amazonaws.cloudtrail#CloudTrailInvalidClientTokenIdException": + response = { + ...(await deserializeAws_json1_1CloudTrailInvalidClientTokenIdExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "CloudWatchLogsDeliveryUnavailableException": + case "com.amazonaws.cloudtrail#CloudWatchLogsDeliveryUnavailableException": + response = { + ...(await deserializeAws_json1_1CloudWatchLogsDeliveryUnavailableExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.cloudtrail#ConflictException": + response = { + ...(await deserializeAws_json1_1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InsufficientDependencyServiceAccessPermissionException": + case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": + response = { + ...(await deserializeAws_json1_1InsufficientDependencyServiceAccessPermissionExceptionResponse( + parsedOutput, + context + )), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InsufficientEncryptionPolicyException": + case "com.amazonaws.cloudtrail#InsufficientEncryptionPolicyException": + response = { + ...(await deserializeAws_json1_1InsufficientEncryptionPolicyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InsufficientS3BucketPolicyException": + case "com.amazonaws.cloudtrail#InsufficientS3BucketPolicyException": + response = { + ...(await deserializeAws_json1_1InsufficientS3BucketPolicyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InsufficientSnsTopicPolicyException": + case "com.amazonaws.cloudtrail#InsufficientSnsTopicPolicyException": + response = { + ...(await deserializeAws_json1_1InsufficientSnsTopicPolicyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidCloudWatchLogsLogGroupArnException": + case "com.amazonaws.cloudtrail#InvalidCloudWatchLogsLogGroupArnException": + response = { + ...(await deserializeAws_json1_1InvalidCloudWatchLogsLogGroupArnExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidCloudWatchLogsRoleArnException": + case "com.amazonaws.cloudtrail#InvalidCloudWatchLogsRoleArnException": + response = { + ...(await deserializeAws_json1_1InvalidCloudWatchLogsRoleArnExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidKmsKeyIdException": + case "com.amazonaws.cloudtrail#InvalidKmsKeyIdException": + response = { + ...(await deserializeAws_json1_1InvalidKmsKeyIdExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidParameterCombinationException": + case "com.amazonaws.cloudtrail#InvalidParameterCombinationException": + response = { + ...(await deserializeAws_json1_1InvalidParameterCombinationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidS3BucketNameException": + case "com.amazonaws.cloudtrail#InvalidS3BucketNameException": + response = { + ...(await deserializeAws_json1_1InvalidS3BucketNameExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidS3PrefixException": + case "com.amazonaws.cloudtrail#InvalidS3PrefixException": + response = { + ...(await deserializeAws_json1_1InvalidS3PrefixExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidSnsTopicNameException": + case "com.amazonaws.cloudtrail#InvalidSnsTopicNameException": + response = { + ...(await deserializeAws_json1_1InvalidSnsTopicNameExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidTagParameterException": + case "com.amazonaws.cloudtrail#InvalidTagParameterException": + response = { + ...(await deserializeAws_json1_1InvalidTagParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidTrailNameException": + case "com.amazonaws.cloudtrail#InvalidTrailNameException": + response = { + ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "KmsException": + case "com.amazonaws.cloudtrail#KmsException": + response = { + ...(await deserializeAws_json1_1KmsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "KmsKeyDisabledException": + case "com.amazonaws.cloudtrail#KmsKeyDisabledException": + response = { + ...(await deserializeAws_json1_1KmsKeyDisabledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "KmsKeyNotFoundException": case "com.amazonaws.cloudtrail#KmsKeyNotFoundException": response = { ...(await deserializeAws_json1_1KmsKeyNotFoundExceptionResponse(parsedOutput, context)), @@ -674,63 +1180,829 @@ const deserializeAws_json1_1CreateTrailCommandError = async ( case "MaximumNumberOfTrailsExceededException": case "com.amazonaws.cloudtrail#MaximumNumberOfTrailsExceededException": response = { - ...(await deserializeAws_json1_1MaximumNumberOfTrailsExceededExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1MaximumNumberOfTrailsExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "NotOrganizationMasterAccountException": + case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": + response = { + ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OrganizationNotInAllFeaturesModeException": + case "com.amazonaws.cloudtrail#OrganizationNotInAllFeaturesModeException": + response = { + ...(await deserializeAws_json1_1OrganizationNotInAllFeaturesModeExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OrganizationsNotInUseException": + case "com.amazonaws.cloudtrail#OrganizationsNotInUseException": + response = { + ...(await deserializeAws_json1_1OrganizationsNotInUseExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "S3BucketDoesNotExistException": + case "com.amazonaws.cloudtrail#S3BucketDoesNotExistException": + response = { + ...(await deserializeAws_json1_1S3BucketDoesNotExistExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TrailAlreadyExistsException": + case "com.amazonaws.cloudtrail#TrailAlreadyExistsException": + response = { + ...(await deserializeAws_json1_1TrailAlreadyExistsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TrailNotProvidedException": + case "com.amazonaws.cloudtrail#TrailNotProvidedException": + response = { + ...(await deserializeAws_json1_1TrailNotProvidedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1DeleteEventDataStoreCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DeleteEventDataStoreCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DeleteEventDataStoreResponse(data, context); + const response: DeleteEventDataStoreCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DeleteEventDataStoreCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EventDataStoreARNInvalidException": + case "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": + response = { + ...(await deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": + response = { + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreTerminationProtectedException": + case "com.amazonaws.cloudtrail#EventDataStoreTerminationProtectedException": + response = { + ...(await deserializeAws_json1_1EventDataStoreTerminationProtectedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InsufficientDependencyServiceAccessPermissionException": + case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": + response = { + ...(await deserializeAws_json1_1InsufficientDependencyServiceAccessPermissionExceptionResponse( + parsedOutput, + context + )), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "NotOrganizationMasterAccountException": + case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": + response = { + ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1DeleteTrailCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DeleteTrailCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DeleteTrailResponse(data, context); + const response: DeleteTrailCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DeleteTrailCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.cloudtrail#ConflictException": + response = { + ...(await deserializeAws_json1_1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InsufficientDependencyServiceAccessPermissionException": + case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": + response = { + ...(await deserializeAws_json1_1InsufficientDependencyServiceAccessPermissionExceptionResponse( + parsedOutput, + context + )), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidHomeRegionException": + case "com.amazonaws.cloudtrail#InvalidHomeRegionException": + response = { + ...(await deserializeAws_json1_1InvalidHomeRegionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidTrailNameException": + case "com.amazonaws.cloudtrail#InvalidTrailNameException": + response = { + ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "NotOrganizationMasterAccountException": + case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": + response = { + ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TrailNotFoundException": + case "com.amazonaws.cloudtrail#TrailNotFoundException": + response = { + ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1DescribeQueryCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DescribeQueryCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DescribeQueryResponse(data, context); + const response: DescribeQueryCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DescribeQueryCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EventDataStoreARNInvalidException": + case "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": + response = { + ...(await deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": + response = { + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InactiveEventDataStoreException": + case "com.amazonaws.cloudtrail#InactiveEventDataStoreException": + response = { + ...(await deserializeAws_json1_1InactiveEventDataStoreExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "QueryIdNotFoundException": + case "com.amazonaws.cloudtrail#QueryIdNotFoundException": + response = { + ...(await deserializeAws_json1_1QueryIdNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1DescribeTrailsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DescribeTrailsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DescribeTrailsResponse(data, context); + const response: DescribeTrailsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DescribeTrailsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidTrailNameException": + case "com.amazonaws.cloudtrail#InvalidTrailNameException": + response = { + ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1GetEventDataStoreCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetEventDataStoreCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetEventDataStoreResponse(data, context); + const response: GetEventDataStoreCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetEventDataStoreCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EventDataStoreARNInvalidException": + case "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": + response = { + ...(await deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": + response = { + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1GetEventSelectorsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetEventSelectorsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetEventSelectorsResponse(data, context); + const response: GetEventSelectorsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetEventSelectorsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidTrailNameException": + case "com.amazonaws.cloudtrail#InvalidTrailNameException": + response = { + ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TrailNotFoundException": + case "com.amazonaws.cloudtrail#TrailNotFoundException": + response = { + ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1GetInsightSelectorsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetInsightSelectorsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetInsightSelectorsResponse(data, context); + const response: GetInsightSelectorsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetInsightSelectorsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InsightNotEnabledException": + case "com.amazonaws.cloudtrail#InsightNotEnabledException": + response = { + ...(await deserializeAws_json1_1InsightNotEnabledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidTrailNameException": + case "com.amazonaws.cloudtrail#InvalidTrailNameException": + response = { + ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TrailNotFoundException": + case "com.amazonaws.cloudtrail#TrailNotFoundException": + response = { + ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1GetQueryResultsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetQueryResultsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetQueryResultsResponse(data, context); + const response: GetQueryResultsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetQueryResultsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EventDataStoreARNInvalidException": + case "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": + response = { + ...(await deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "NotOrganizationMasterAccountException": - case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": response = { - ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "OperationNotPermittedException": - case "com.amazonaws.cloudtrail#OperationNotPermittedException": + case "InactiveEventDataStoreException": + case "com.amazonaws.cloudtrail#InactiveEventDataStoreException": response = { - ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InactiveEventDataStoreExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "OrganizationNotInAllFeaturesModeException": - case "com.amazonaws.cloudtrail#OrganizationNotInAllFeaturesModeException": + case "InvalidMaxResultsException": + case "com.amazonaws.cloudtrail#InvalidMaxResultsException": response = { - ...(await deserializeAws_json1_1OrganizationNotInAllFeaturesModeExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidMaxResultsExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "OrganizationsNotInUseException": - case "com.amazonaws.cloudtrail#OrganizationsNotInUseException": + case "InvalidNextTokenException": + case "com.amazonaws.cloudtrail#InvalidNextTokenException": response = { - ...(await deserializeAws_json1_1OrganizationsNotInUseExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidNextTokenExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "S3BucketDoesNotExistException": - case "com.amazonaws.cloudtrail#S3BucketDoesNotExistException": + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": response = { - ...(await deserializeAws_json1_1S3BucketDoesNotExistExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "TrailAlreadyExistsException": - case "com.amazonaws.cloudtrail#TrailAlreadyExistsException": + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": response = { - ...(await deserializeAws_json1_1TrailAlreadyExistsExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "TrailNotProvidedException": - case "com.amazonaws.cloudtrail#TrailNotProvidedException": + case "QueryIdNotFoundException": + case "com.amazonaws.cloudtrail#QueryIdNotFoundException": response = { - ...(await deserializeAws_json1_1TrailNotProvidedExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1QueryIdNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -760,27 +2032,27 @@ const deserializeAws_json1_1CreateTrailCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DeleteTrailCommand = async ( +export const deserializeAws_json1_1GetTrailCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DeleteTrailCommandError(output, context); + return deserializeAws_json1_1GetTrailCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DeleteTrailResponse(data, context); - const response: DeleteTrailCommandOutput = { + contents = deserializeAws_json1_1GetTrailResponse(data, context); + const response: GetTrailCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DeleteTrailCommandError = async ( +const deserializeAws_json1_1GetTrailCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -789,45 +2061,88 @@ const deserializeAws_json1_1DeleteTrailCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "ConflictException": - case "com.amazonaws.cloudtrail#ConflictException": + case "InvalidTrailNameException": + case "com.amazonaws.cloudtrail#InvalidTrailNameException": response = { - ...(await deserializeAws_json1_1ConflictExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InsufficientDependencyServiceAccessPermissionException": - case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": response = { - ...(await deserializeAws_json1_1InsufficientDependencyServiceAccessPermissionExceptionResponse( - parsedOutput, - context - )), + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidHomeRegionException": - case "com.amazonaws.cloudtrail#InvalidHomeRegionException": + case "TrailNotFoundException": + case "com.amazonaws.cloudtrail#TrailNotFoundException": response = { - ...(await deserializeAws_json1_1InvalidHomeRegionExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTrailNameException": - case "com.amazonaws.cloudtrail#InvalidTrailNameException": + case "UnsupportedOperationException": + case "com.amazonaws.cloudtrail#UnsupportedOperationException": response = { - ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1UnsupportedOperationExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "NotOrganizationMasterAccountException": - case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; response = { - ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1GetTrailStatusCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetTrailStatusCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetTrailStatusResponse(data, context); + const response: GetTrailStatusCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetTrailStatusCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidTrailNameException": + case "com.amazonaws.cloudtrail#InvalidTrailNameException": + response = { + ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -873,27 +2188,27 @@ const deserializeAws_json1_1DeleteTrailCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DescribeTrailsCommand = async ( +export const deserializeAws_json1_1ListEventDataStoresCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DescribeTrailsCommandError(output, context); + return deserializeAws_json1_1ListEventDataStoresCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DescribeTrailsResponse(data, context); - const response: DescribeTrailsCommandOutput = { + contents = deserializeAws_json1_1ListEventDataStoresResponse(data, context); + const response: ListEventDataStoresCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DescribeTrailsCommandError = async ( +const deserializeAws_json1_1ListEventDataStoresCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -902,10 +2217,18 @@ const deserializeAws_json1_1DescribeTrailsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "InvalidTrailNameException": - case "com.amazonaws.cloudtrail#InvalidTrailNameException": + case "InvalidMaxResultsException": + case "com.amazonaws.cloudtrail#InvalidMaxResultsException": response = { - ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidMaxResultsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidNextTokenException": + case "com.amazonaws.cloudtrail#InvalidNextTokenException": + response = { + ...(await deserializeAws_json1_1InvalidNextTokenExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -943,27 +2266,27 @@ const deserializeAws_json1_1DescribeTrailsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1GetEventSelectorsCommand = async ( +export const deserializeAws_json1_1ListPublicKeysCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1GetEventSelectorsCommandError(output, context); + return deserializeAws_json1_1ListPublicKeysCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1GetEventSelectorsResponse(data, context); - const response: GetEventSelectorsCommandOutput = { + contents = deserializeAws_json1_1ListPublicKeysResponse(data, context); + const response: ListPublicKeysCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1GetEventSelectorsCommandError = async ( +const deserializeAws_json1_1ListPublicKeysCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -972,26 +2295,26 @@ const deserializeAws_json1_1GetEventSelectorsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "InvalidTrailNameException": - case "com.amazonaws.cloudtrail#InvalidTrailNameException": + case "InvalidTimeRangeException": + case "com.amazonaws.cloudtrail#InvalidTimeRangeException": response = { - ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidTimeRangeExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "OperationNotPermittedException": - case "com.amazonaws.cloudtrail#OperationNotPermittedException": + case "InvalidTokenException": + case "com.amazonaws.cloudtrail#InvalidTokenException": response = { - ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidTokenExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "TrailNotFoundException": - case "com.amazonaws.cloudtrail#TrailNotFoundException": + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": response = { - ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1021,27 +2344,27 @@ const deserializeAws_json1_1GetEventSelectorsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1GetInsightSelectorsCommand = async ( +export const deserializeAws_json1_1ListQueriesCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1GetInsightSelectorsCommandError(output, context); + return deserializeAws_json1_1ListQueriesCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1GetInsightSelectorsResponse(data, context); - const response: GetInsightSelectorsCommandOutput = { + contents = deserializeAws_json1_1ListQueriesResponse(data, context); + const response: ListQueriesCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1GetInsightSelectorsCommandError = async ( +const deserializeAws_json1_1ListQueriesCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1050,34 +2373,74 @@ const deserializeAws_json1_1GetInsightSelectorsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "InsightNotEnabledException": - case "com.amazonaws.cloudtrail#InsightNotEnabledException": + case "EventDataStoreARNInvalidException": + case "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": response = { - ...(await deserializeAws_json1_1InsightNotEnabledExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTrailNameException": - case "com.amazonaws.cloudtrail#InvalidTrailNameException": + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": response = { - ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "OperationNotPermittedException": - case "com.amazonaws.cloudtrail#OperationNotPermittedException": + case "InactiveEventDataStoreException": + case "com.amazonaws.cloudtrail#InactiveEventDataStoreException": response = { - ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InactiveEventDataStoreExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "TrailNotFoundException": - case "com.amazonaws.cloudtrail#TrailNotFoundException": + case "InvalidDateRangeException": + case "com.amazonaws.cloudtrail#InvalidDateRangeException": response = { - ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidDateRangeExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidMaxResultsException": + case "com.amazonaws.cloudtrail#InvalidMaxResultsException": + response = { + ...(await deserializeAws_json1_1InvalidMaxResultsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidNextTokenException": + case "com.amazonaws.cloudtrail#InvalidNextTokenException": + response = { + ...(await deserializeAws_json1_1InvalidNextTokenExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidQueryStatusException": + case "com.amazonaws.cloudtrail#InvalidQueryStatusException": + response = { + ...(await deserializeAws_json1_1InvalidQueryStatusExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": + response = { + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1107,27 +2470,27 @@ const deserializeAws_json1_1GetInsightSelectorsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1GetTrailCommand = async ( +export const deserializeAws_json1_1ListTagsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1GetTrailCommandError(output, context); + return deserializeAws_json1_1ListTagsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1GetTrailResponse(data, context); - const response: GetTrailCommandOutput = { + contents = deserializeAws_json1_1ListTagsResponse(data, context); + const response: ListTagsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1GetTrailCommandError = async ( +const deserializeAws_json1_1ListTagsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1136,6 +2499,38 @@ const deserializeAws_json1_1GetTrailCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "CloudTrailARNInvalidException": + case "com.amazonaws.cloudtrail#CloudTrailARNInvalidException": + response = { + ...(await deserializeAws_json1_1CloudTrailARNInvalidExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": + response = { + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InactiveEventDataStoreException": + case "com.amazonaws.cloudtrail#InactiveEventDataStoreException": + response = { + ...(await deserializeAws_json1_1InactiveEventDataStoreExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidTokenException": + case "com.amazonaws.cloudtrail#InvalidTokenException": + response = { + ...(await deserializeAws_json1_1InvalidTokenExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InvalidTrailNameException": case "com.amazonaws.cloudtrail#InvalidTrailNameException": response = { @@ -1152,10 +2547,18 @@ const deserializeAws_json1_1GetTrailCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "TrailNotFoundException": - case "com.amazonaws.cloudtrail#TrailNotFoundException": + case "ResourceNotFoundException": + case "com.amazonaws.cloudtrail#ResourceNotFoundException": response = { - ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceTypeNotSupportedException": + case "com.amazonaws.cloudtrail#ResourceTypeNotSupportedException": + response = { + ...(await deserializeAws_json1_1ResourceTypeNotSupportedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1185,27 +2588,27 @@ const deserializeAws_json1_1GetTrailCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1GetTrailStatusCommand = async ( +export const deserializeAws_json1_1ListTrailsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1GetTrailStatusCommandError(output, context); + return deserializeAws_json1_1ListTrailsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1GetTrailStatusResponse(data, context); - const response: GetTrailStatusCommandOutput = { + contents = deserializeAws_json1_1ListTrailsResponse(data, context); + const response: ListTrailsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1GetTrailStatusCommandError = async ( +const deserializeAws_json1_1ListTrailsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1214,14 +2617,6 @@ const deserializeAws_json1_1GetTrailStatusCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "InvalidTrailNameException": - case "com.amazonaws.cloudtrail#InvalidTrailNameException": - response = { - ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "OperationNotPermittedException": case "com.amazonaws.cloudtrail#OperationNotPermittedException": response = { @@ -1230,14 +2625,6 @@ const deserializeAws_json1_1GetTrailStatusCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "TrailNotFoundException": - case "com.amazonaws.cloudtrail#TrailNotFoundException": - response = { - ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "UnsupportedOperationException": case "com.amazonaws.cloudtrail#UnsupportedOperationException": response = { @@ -1263,27 +2650,27 @@ const deserializeAws_json1_1GetTrailStatusCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1ListPublicKeysCommand = async ( +export const deserializeAws_json1_1LookupEventsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1ListPublicKeysCommandError(output, context); + return deserializeAws_json1_1LookupEventsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1ListPublicKeysResponse(data, context); - const response: ListPublicKeysCommandOutput = { + contents = deserializeAws_json1_1LookupEventsResponse(data, context); + const response: LookupEventsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1ListPublicKeysCommandError = async ( +const deserializeAws_json1_1LookupEventsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1292,18 +2679,42 @@ const deserializeAws_json1_1ListPublicKeysCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "InvalidTimeRangeException": - case "com.amazonaws.cloudtrail#InvalidTimeRangeException": + case "InvalidEventCategoryException": + case "com.amazonaws.cloudtrail#InvalidEventCategoryException": response = { - ...(await deserializeAws_json1_1InvalidTimeRangeExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidEventCategoryExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTokenException": - case "com.amazonaws.cloudtrail#InvalidTokenException": + case "InvalidLookupAttributesException": + case "com.amazonaws.cloudtrail#InvalidLookupAttributesException": response = { - ...(await deserializeAws_json1_1InvalidTokenExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidLookupAttributesExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidMaxResultsException": + case "com.amazonaws.cloudtrail#InvalidMaxResultsException": + response = { + ...(await deserializeAws_json1_1InvalidMaxResultsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidNextTokenException": + case "com.amazonaws.cloudtrail#InvalidNextTokenException": + response = { + ...(await deserializeAws_json1_1InvalidNextTokenExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidTimeRangeException": + case "com.amazonaws.cloudtrail#InvalidTimeRangeException": + response = { + ...(await deserializeAws_json1_1InvalidTimeRangeExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1341,27 +2752,27 @@ const deserializeAws_json1_1ListPublicKeysCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1ListTagsCommand = async ( +export const deserializeAws_json1_1PutEventSelectorsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1ListTagsCommandError(output, context); + return deserializeAws_json1_1PutEventSelectorsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1ListTagsResponse(data, context); - const response: ListTagsCommandOutput = { + contents = deserializeAws_json1_1PutEventSelectorsResponse(data, context); + const response: PutEventSelectorsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1ListTagsCommandError = async ( +const deserializeAws_json1_1PutEventSelectorsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1370,18 +2781,29 @@ const deserializeAws_json1_1ListTagsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "CloudTrailARNInvalidException": - case "com.amazonaws.cloudtrail#CloudTrailARNInvalidException": + case "InsufficientDependencyServiceAccessPermissionException": + case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": response = { - ...(await deserializeAws_json1_1CloudTrailARNInvalidExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InsufficientDependencyServiceAccessPermissionExceptionResponse( + parsedOutput, + context + )), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTokenException": - case "com.amazonaws.cloudtrail#InvalidTokenException": + case "InvalidEventSelectorsException": + case "com.amazonaws.cloudtrail#InvalidEventSelectorsException": response = { - ...(await deserializeAws_json1_1InvalidTokenExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidEventSelectorsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidHomeRegionException": + case "com.amazonaws.cloudtrail#InvalidHomeRegionException": + response = { + ...(await deserializeAws_json1_1InvalidHomeRegionExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1394,26 +2816,26 @@ const deserializeAws_json1_1ListTagsCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "OperationNotPermittedException": - case "com.amazonaws.cloudtrail#OperationNotPermittedException": + case "NotOrganizationMasterAccountException": + case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": response = { - ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "ResourceNotFoundException": - case "com.amazonaws.cloudtrail#ResourceNotFoundException": + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": response = { - ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "ResourceTypeNotSupportedException": - case "com.amazonaws.cloudtrail#ResourceTypeNotSupportedException": + case "TrailNotFoundException": + case "com.amazonaws.cloudtrail#TrailNotFoundException": response = { - ...(await deserializeAws_json1_1ResourceTypeNotSupportedExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1443,27 +2865,27 @@ const deserializeAws_json1_1ListTagsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1ListTrailsCommand = async ( +export const deserializeAws_json1_1PutInsightSelectorsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1ListTrailsCommandError(output, context); + return deserializeAws_json1_1PutInsightSelectorsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1ListTrailsResponse(data, context); - const response: ListTrailsCommandOutput = { + contents = deserializeAws_json1_1PutInsightSelectorsResponse(data, context); + const response: PutInsightSelectorsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1ListTrailsCommandError = async ( +const deserializeAws_json1_1PutInsightSelectorsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1472,10 +2894,82 @@ const deserializeAws_json1_1ListTrailsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "InsufficientEncryptionPolicyException": + case "com.amazonaws.cloudtrail#InsufficientEncryptionPolicyException": + response = { + ...(await deserializeAws_json1_1InsufficientEncryptionPolicyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InsufficientS3BucketPolicyException": + case "com.amazonaws.cloudtrail#InsufficientS3BucketPolicyException": + response = { + ...(await deserializeAws_json1_1InsufficientS3BucketPolicyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidHomeRegionException": + case "com.amazonaws.cloudtrail#InvalidHomeRegionException": + response = { + ...(await deserializeAws_json1_1InvalidHomeRegionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInsightSelectorsException": + case "com.amazonaws.cloudtrail#InvalidInsightSelectorsException": + response = { + ...(await deserializeAws_json1_1InvalidInsightSelectorsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidTrailNameException": + case "com.amazonaws.cloudtrail#InvalidTrailNameException": + response = { + ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "KmsException": + case "com.amazonaws.cloudtrail#KmsException": + response = { + ...(await deserializeAws_json1_1KmsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "NotOrganizationMasterAccountException": + case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": + response = { + ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "OperationNotPermittedException": case "com.amazonaws.cloudtrail#OperationNotPermittedException": response = { - ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "S3BucketDoesNotExistException": + case "com.amazonaws.cloudtrail#S3BucketDoesNotExistException": + response = { + ...(await deserializeAws_json1_1S3BucketDoesNotExistExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TrailNotFoundException": + case "com.amazonaws.cloudtrail#TrailNotFoundException": + response = { + ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1505,27 +2999,27 @@ const deserializeAws_json1_1ListTrailsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1LookupEventsCommand = async ( +export const deserializeAws_json1_1RemoveTagsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1LookupEventsCommandError(output, context); + return deserializeAws_json1_1RemoveTagsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1LookupEventsResponse(data, context); - const response: LookupEventsCommandOutput = { + contents = deserializeAws_json1_1RemoveTagsResponse(data, context); + const response: RemoveTagsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1LookupEventsCommandError = async ( +const deserializeAws_json1_1RemoveTagsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1534,42 +3028,50 @@ const deserializeAws_json1_1LookupEventsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "InvalidEventCategoryException": - case "com.amazonaws.cloudtrail#InvalidEventCategoryException": + case "CloudTrailARNInvalidException": + case "com.amazonaws.cloudtrail#CloudTrailARNInvalidException": response = { - ...(await deserializeAws_json1_1InvalidEventCategoryExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1CloudTrailARNInvalidExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidLookupAttributesException": - case "com.amazonaws.cloudtrail#InvalidLookupAttributesException": + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": response = { - ...(await deserializeAws_json1_1InvalidLookupAttributesExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidMaxResultsException": - case "com.amazonaws.cloudtrail#InvalidMaxResultsException": + case "InactiveEventDataStoreException": + case "com.amazonaws.cloudtrail#InactiveEventDataStoreException": response = { - ...(await deserializeAws_json1_1InvalidMaxResultsExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InactiveEventDataStoreExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidNextTokenException": - case "com.amazonaws.cloudtrail#InvalidNextTokenException": + case "InvalidTagParameterException": + case "com.amazonaws.cloudtrail#InvalidTagParameterException": response = { - ...(await deserializeAws_json1_1InvalidNextTokenExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidTagParameterExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTimeRangeException": - case "com.amazonaws.cloudtrail#InvalidTimeRangeException": + case "InvalidTrailNameException": + case "com.amazonaws.cloudtrail#InvalidTrailNameException": response = { - ...(await deserializeAws_json1_1InvalidTimeRangeExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "NotOrganizationMasterAccountException": + case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": + response = { + ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1582,6 +3084,22 @@ const deserializeAws_json1_1LookupEventsCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceNotFoundException": + case "com.amazonaws.cloudtrail#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceTypeNotSupportedException": + case "com.amazonaws.cloudtrail#ResourceTypeNotSupportedException": + response = { + ...(await deserializeAws_json1_1ResourceTypeNotSupportedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "UnsupportedOperationException": case "com.amazonaws.cloudtrail#UnsupportedOperationException": response = { @@ -1607,27 +3125,27 @@ const deserializeAws_json1_1LookupEventsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1PutEventSelectorsCommand = async ( +export const deserializeAws_json1_1RestoreEventDataStoreCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1PutEventSelectorsCommandError(output, context); + return deserializeAws_json1_1RestoreEventDataStoreCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1PutEventSelectorsResponse(data, context); - const response: PutEventSelectorsCommandOutput = { + contents = deserializeAws_json1_1RestoreEventDataStoreResponse(data, context); + const response: RestoreEventDataStoreCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1PutEventSelectorsCommandError = async ( +const deserializeAws_json1_1RestoreEventDataStoreCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1636,6 +3154,38 @@ const deserializeAws_json1_1PutEventSelectorsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "CloudTrailAccessNotEnabledException": + case "com.amazonaws.cloudtrail#CloudTrailAccessNotEnabledException": + response = { + ...(await deserializeAws_json1_1CloudTrailAccessNotEnabledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreARNInvalidException": + case "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": + response = { + ...(await deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreMaxLimitExceededException": + case "com.amazonaws.cloudtrail#EventDataStoreMaxLimitExceededException": + response = { + ...(await deserializeAws_json1_1EventDataStoreMaxLimitExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": + response = { + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InsufficientDependencyServiceAccessPermissionException": case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": response = { @@ -1647,26 +3197,18 @@ const deserializeAws_json1_1PutEventSelectorsCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "InvalidEventSelectorsException": - case "com.amazonaws.cloudtrail#InvalidEventSelectorsException": - response = { - ...(await deserializeAws_json1_1InvalidEventSelectorsExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidHomeRegionException": - case "com.amazonaws.cloudtrail#InvalidHomeRegionException": + case "InvalidEventDataStoreStatusException": + case "com.amazonaws.cloudtrail#InvalidEventDataStoreStatusException": response = { - ...(await deserializeAws_json1_1InvalidHomeRegionExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidEventDataStoreStatusExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTrailNameException": - case "com.amazonaws.cloudtrail#InvalidTrailNameException": + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": response = { - ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1687,10 +3229,18 @@ const deserializeAws_json1_1PutEventSelectorsCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "TrailNotFoundException": - case "com.amazonaws.cloudtrail#TrailNotFoundException": + case "OrganizationNotInAllFeaturesModeException": + case "com.amazonaws.cloudtrail#OrganizationNotInAllFeaturesModeException": response = { - ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1OrganizationNotInAllFeaturesModeExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OrganizationsNotInUseException": + case "com.amazonaws.cloudtrail#OrganizationsNotInUseException": + response = { + ...(await deserializeAws_json1_1OrganizationsNotInUseExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1720,27 +3270,27 @@ const deserializeAws_json1_1PutEventSelectorsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1PutInsightSelectorsCommand = async ( +export const deserializeAws_json1_1StartLoggingCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1PutInsightSelectorsCommandError(output, context); + return deserializeAws_json1_1StartLoggingCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1PutInsightSelectorsResponse(data, context); - const response: PutInsightSelectorsCommandOutput = { + contents = deserializeAws_json1_1StartLoggingResponse(data, context); + const response: StartLoggingCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1PutInsightSelectorsCommandError = async ( +const deserializeAws_json1_1StartLoggingCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1749,18 +3299,13 @@ const deserializeAws_json1_1PutInsightSelectorsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "InsufficientEncryptionPolicyException": - case "com.amazonaws.cloudtrail#InsufficientEncryptionPolicyException": - response = { - ...(await deserializeAws_json1_1InsufficientEncryptionPolicyExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InsufficientS3BucketPolicyException": - case "com.amazonaws.cloudtrail#InsufficientS3BucketPolicyException": + case "InsufficientDependencyServiceAccessPermissionException": + case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": response = { - ...(await deserializeAws_json1_1InsufficientS3BucketPolicyExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InsufficientDependencyServiceAccessPermissionExceptionResponse( + parsedOutput, + context + )), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1773,14 +3318,6 @@ const deserializeAws_json1_1PutInsightSelectorsCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "InvalidInsightSelectorsException": - case "com.amazonaws.cloudtrail#InvalidInsightSelectorsException": - response = { - ...(await deserializeAws_json1_1InvalidInsightSelectorsExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "InvalidTrailNameException": case "com.amazonaws.cloudtrail#InvalidTrailNameException": response = { @@ -1789,14 +3326,6 @@ const deserializeAws_json1_1PutInsightSelectorsCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "KmsException": - case "com.amazonaws.cloudtrail#KmsException": - response = { - ...(await deserializeAws_json1_1KmsExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "NotOrganizationMasterAccountException": case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": response = { @@ -1813,14 +3342,6 @@ const deserializeAws_json1_1PutInsightSelectorsCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "S3BucketDoesNotExistException": - case "com.amazonaws.cloudtrail#S3BucketDoesNotExistException": - response = { - ...(await deserializeAws_json1_1S3BucketDoesNotExistExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "TrailNotFoundException": case "com.amazonaws.cloudtrail#TrailNotFoundException": response = { @@ -1854,27 +3375,27 @@ const deserializeAws_json1_1PutInsightSelectorsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1RemoveTagsCommand = async ( +export const deserializeAws_json1_1StartQueryCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1RemoveTagsCommandError(output, context); + return deserializeAws_json1_1StartQueryCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1RemoveTagsResponse(data, context); - const response: RemoveTagsCommandOutput = { + contents = deserializeAws_json1_1StartQueryResponse(data, context); + const response: StartQueryCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1RemoveTagsCommandError = async ( +const deserializeAws_json1_1StartQueryCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1883,58 +3404,58 @@ const deserializeAws_json1_1RemoveTagsCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { - case "CloudTrailARNInvalidException": - case "com.amazonaws.cloudtrail#CloudTrailARNInvalidException": + case "EventDataStoreARNInvalidException": + case "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": response = { - ...(await deserializeAws_json1_1CloudTrailARNInvalidExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTagParameterException": - case "com.amazonaws.cloudtrail#InvalidTagParameterException": + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": response = { - ...(await deserializeAws_json1_1InvalidTagParameterExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InvalidTrailNameException": - case "com.amazonaws.cloudtrail#InvalidTrailNameException": + case "InactiveEventDataStoreException": + case "com.amazonaws.cloudtrail#InactiveEventDataStoreException": response = { - ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InactiveEventDataStoreExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "NotOrganizationMasterAccountException": - case "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException": + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": response = { - ...(await deserializeAws_json1_1NotOrganizationMasterAccountExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "OperationNotPermittedException": - case "com.amazonaws.cloudtrail#OperationNotPermittedException": + case "InvalidQueryStatementException": + case "com.amazonaws.cloudtrail#InvalidQueryStatementException": response = { - ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidQueryStatementExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "ResourceNotFoundException": - case "com.amazonaws.cloudtrail#ResourceNotFoundException": + case "MaxConcurrentQueriesException": + case "com.amazonaws.cloudtrail#MaxConcurrentQueriesException": response = { - ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1MaxConcurrentQueriesExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "ResourceTypeNotSupportedException": - case "com.amazonaws.cloudtrail#ResourceTypeNotSupportedException": + case "OperationNotPermittedException": + case "com.amazonaws.cloudtrail#OperationNotPermittedException": response = { - ...(await deserializeAws_json1_1ResourceTypeNotSupportedExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1OperationNotPermittedExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1964,27 +3485,27 @@ const deserializeAws_json1_1RemoveTagsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1StartLoggingCommand = async ( +export const deserializeAws_json1_1StopLoggingCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1StartLoggingCommandError(output, context); + return deserializeAws_json1_1StopLoggingCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1StartLoggingResponse(data, context); - const response: StartLoggingCommandOutput = { + contents = deserializeAws_json1_1StopLoggingResponse(data, context); + const response: StopLoggingCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1StartLoggingCommandError = async ( +const deserializeAws_json1_1StopLoggingCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2069,27 +3590,27 @@ const deserializeAws_json1_1StartLoggingCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1StopLoggingCommand = async ( +export const deserializeAws_json1_1UpdateEventDataStoreCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1StopLoggingCommandError(output, context); + return deserializeAws_json1_1UpdateEventDataStoreCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1StopLoggingResponse(data, context); - const response: StopLoggingCommandOutput = { + contents = deserializeAws_json1_1UpdateEventDataStoreResponse(data, context); + const response: UpdateEventDataStoreCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1StopLoggingCommandError = async ( +const deserializeAws_json1_1UpdateEventDataStoreCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2098,6 +3619,38 @@ const deserializeAws_json1_1StopLoggingCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "CloudTrailAccessNotEnabledException": + case "com.amazonaws.cloudtrail#CloudTrailAccessNotEnabledException": + response = { + ...(await deserializeAws_json1_1CloudTrailAccessNotEnabledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreARNInvalidException": + case "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": + response = { + ...(await deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EventDataStoreNotFoundException": + case "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": + response = { + ...(await deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InactiveEventDataStoreException": + case "com.amazonaws.cloudtrail#InactiveEventDataStoreException": + response = { + ...(await deserializeAws_json1_1InactiveEventDataStoreExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InsufficientDependencyServiceAccessPermissionException": case "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException": response = { @@ -2109,18 +3662,10 @@ const deserializeAws_json1_1StopLoggingCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "InvalidHomeRegionException": - case "com.amazonaws.cloudtrail#InvalidHomeRegionException": - response = { - ...(await deserializeAws_json1_1InvalidHomeRegionExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidTrailNameException": - case "com.amazonaws.cloudtrail#InvalidTrailNameException": + case "InvalidParameterException": + case "com.amazonaws.cloudtrail#InvalidParameterException": response = { - ...(await deserializeAws_json1_1InvalidTrailNameExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2141,10 +3686,18 @@ const deserializeAws_json1_1StopLoggingCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "TrailNotFoundException": - case "com.amazonaws.cloudtrail#TrailNotFoundException": + case "OrganizationNotInAllFeaturesModeException": + case "com.amazonaws.cloudtrail#OrganizationNotInAllFeaturesModeException": response = { - ...(await deserializeAws_json1_1TrailNotFoundExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1OrganizationNotInAllFeaturesModeExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OrganizationsNotInUseException": + case "com.amazonaws.cloudtrail#OrganizationsNotInUseException": + response = { + ...(await deserializeAws_json1_1OrganizationsNotInUseExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2522,6 +4075,111 @@ const deserializeAws_json1_1ConflictExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1EventDataStoreAlreadyExistsExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1EventDataStoreAlreadyExistsException(body, context); + const contents: EventDataStoreAlreadyExistsException = { + name: "EventDataStoreAlreadyExistsException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1EventDataStoreARNInvalidExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1EventDataStoreARNInvalidException(body, context); + const contents: EventDataStoreARNInvalidException = { + name: "EventDataStoreARNInvalidException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1EventDataStoreMaxLimitExceededExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1EventDataStoreMaxLimitExceededException(body, context); + const contents: EventDataStoreMaxLimitExceededException = { + name: "EventDataStoreMaxLimitExceededException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1EventDataStoreNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1EventDataStoreNotFoundException(body, context); + const contents: EventDataStoreNotFoundException = { + name: "EventDataStoreNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1EventDataStoreTerminationProtectedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1EventDataStoreTerminationProtectedException(body, context); + const contents: EventDataStoreTerminationProtectedException = { + name: "EventDataStoreTerminationProtectedException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1InactiveEventDataStoreExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1InactiveEventDataStoreException(body, context); + const contents: InactiveEventDataStoreException = { + name: "InactiveEventDataStoreException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1InactiveQueryExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1InactiveQueryException(body, context); + const contents: InactiveQueryException = { + name: "InactiveQueryException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1InsightNotEnabledExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -2627,6 +4285,21 @@ const deserializeAws_json1_1InvalidCloudWatchLogsRoleArnExceptionResponse = asyn return contents; }; +const deserializeAws_json1_1InvalidDateRangeExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1InvalidDateRangeException(body, context); + const contents: InvalidDateRangeException = { + name: "InvalidDateRangeException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1InvalidEventCategoryExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -2642,6 +4315,21 @@ const deserializeAws_json1_1InvalidEventCategoryExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1InvalidEventDataStoreStatusExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1InvalidEventDataStoreStatusException(body, context); + const contents: InvalidEventDataStoreStatusException = { + name: "InvalidEventDataStoreStatusException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1InvalidEventSelectorsExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -2762,6 +4450,51 @@ const deserializeAws_json1_1InvalidParameterCombinationExceptionResponse = async return contents; }; +const deserializeAws_json1_1InvalidParameterExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1InvalidParameterException(body, context); + const contents: InvalidParameterException = { + name: "InvalidParameterException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1InvalidQueryStatementExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1InvalidQueryStatementException(body, context); + const contents: InvalidQueryStatementException = { + name: "InvalidQueryStatementException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1InvalidQueryStatusExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1InvalidQueryStatusException(body, context); + const contents: InvalidQueryStatusException = { + name: "InvalidQueryStatusException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1InvalidS3BucketNameExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -2912,6 +4645,21 @@ const deserializeAws_json1_1KmsKeyNotFoundExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1MaxConcurrentQueriesExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1MaxConcurrentQueriesException(body, context); + const contents: MaxConcurrentQueriesException = { + name: "MaxConcurrentQueriesException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1MaximumNumberOfTrailsExceededExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -2987,6 +4735,21 @@ const deserializeAws_json1_1OrganizationsNotInUseExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1QueryIdNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1QueryIdNotFoundException(body, context); + const contents: QueryIdNotFoundException = { + name: "QueryIdNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1ResourceNotFoundExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -3165,6 +4928,39 @@ const serializeAws_json1_1AdvancedFieldSelectors = (input: AdvancedFieldSelector }); }; +const serializeAws_json1_1CancelQueryRequest = (input: CancelQueryRequest, context: __SerdeContext): any => { + return { + ...(input.EventDataStore !== undefined && + input.EventDataStore !== null && { EventDataStore: input.EventDataStore }), + ...(input.QueryId !== undefined && input.QueryId !== null && { QueryId: input.QueryId }), + }; +}; + +const serializeAws_json1_1CreateEventDataStoreRequest = ( + input: CreateEventDataStoreRequest, + context: __SerdeContext +): any => { + return { + ...(input.AdvancedEventSelectors !== undefined && + input.AdvancedEventSelectors !== null && { + AdvancedEventSelectors: serializeAws_json1_1AdvancedEventSelectors(input.AdvancedEventSelectors, context), + }), + ...(input.MultiRegionEnabled !== undefined && + input.MultiRegionEnabled !== null && { MultiRegionEnabled: input.MultiRegionEnabled }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.OrganizationEnabled !== undefined && + input.OrganizationEnabled !== null && { OrganizationEnabled: input.OrganizationEnabled }), + ...(input.RetentionPeriod !== undefined && + input.RetentionPeriod !== null && { RetentionPeriod: input.RetentionPeriod }), + ...(input.TagsList !== undefined && + input.TagsList !== null && { TagsList: serializeAws_json1_1TagsList(input.TagsList, context) }), + ...(input.TerminationProtectionEnabled !== undefined && + input.TerminationProtectionEnabled !== null && { + TerminationProtectionEnabled: input.TerminationProtectionEnabled, + }), + }; +}; + const serializeAws_json1_1CreateTrailRequest = (input: CreateTrailRequest, context: __SerdeContext): any => { return { ...(input.CloudWatchLogsLogGroupArn !== undefined && @@ -3219,12 +5015,30 @@ const serializeAws_json1_1DataResourceValues = (input: string[], context: __Serd }); }; +const serializeAws_json1_1DeleteEventDataStoreRequest = ( + input: DeleteEventDataStoreRequest, + context: __SerdeContext +): any => { + return { + ...(input.EventDataStore !== undefined && + input.EventDataStore !== null && { EventDataStore: input.EventDataStore }), + }; +}; + const serializeAws_json1_1DeleteTrailRequest = (input: DeleteTrailRequest, context: __SerdeContext): any => { return { ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), }; }; +const serializeAws_json1_1DescribeQueryRequest = (input: DescribeQueryRequest, context: __SerdeContext): any => { + return { + ...(input.EventDataStore !== undefined && + input.EventDataStore !== null && { EventDataStore: input.EventDataStore }), + ...(input.QueryId !== undefined && input.QueryId !== null && { QueryId: input.QueryId }), + }; +}; + const serializeAws_json1_1DescribeTrailsRequest = (input: DescribeTrailsRequest, context: __SerdeContext): any => { return { ...(input.includeShadowTrails !== undefined && @@ -3277,6 +5091,16 @@ const serializeAws_json1_1ExcludeManagementEventSources = (input: string[], cont }); }; +const serializeAws_json1_1GetEventDataStoreRequest = ( + input: GetEventDataStoreRequest, + context: __SerdeContext +): any => { + return { + ...(input.EventDataStore !== undefined && + input.EventDataStore !== null && { EventDataStore: input.EventDataStore }), + }; +}; + const serializeAws_json1_1GetEventSelectorsRequest = ( input: GetEventSelectorsRequest, context: __SerdeContext @@ -3295,6 +5119,17 @@ const serializeAws_json1_1GetInsightSelectorsRequest = ( }; }; +const serializeAws_json1_1GetQueryResultsRequest = (input: GetQueryResultsRequest, context: __SerdeContext): any => { + return { + ...(input.EventDataStore !== undefined && + input.EventDataStore !== null && { EventDataStore: input.EventDataStore }), + ...(input.MaxQueryResults !== undefined && + input.MaxQueryResults !== null && { MaxQueryResults: input.MaxQueryResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.QueryId !== undefined && input.QueryId !== null && { QueryId: input.QueryId }), + }; +}; + const serializeAws_json1_1GetTrailRequest = (input: GetTrailRequest, context: __SerdeContext): any => { return { ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), @@ -3324,6 +5159,16 @@ const serializeAws_json1_1InsightSelectors = (input: InsightSelector[], context: }); }; +const serializeAws_json1_1ListEventDataStoresRequest = ( + input: ListEventDataStoresRequest, + context: __SerdeContext +): any => { + return { + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }; +}; + const serializeAws_json1_1ListPublicKeysRequest = (input: ListPublicKeysRequest, context: __SerdeContext): any => { return { ...(input.EndTime !== undefined && @@ -3334,6 +5179,20 @@ const serializeAws_json1_1ListPublicKeysRequest = (input: ListPublicKeysRequest, }; }; +const serializeAws_json1_1ListQueriesRequest = (input: ListQueriesRequest, context: __SerdeContext): any => { + return { + ...(input.EndTime !== undefined && + input.EndTime !== null && { EndTime: Math.round(input.EndTime.getTime() / 1000) }), + ...(input.EventDataStore !== undefined && + input.EventDataStore !== null && { EventDataStore: input.EventDataStore }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.QueryStatus !== undefined && input.QueryStatus !== null && { QueryStatus: input.QueryStatus }), + ...(input.StartTime !== undefined && + input.StartTime !== null && { StartTime: Math.round(input.StartTime.getTime() / 1000) }), + }; +}; + const serializeAws_json1_1ListTagsRequest = (input: ListTagsRequest, context: __SerdeContext): any => { return { ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), @@ -3445,12 +5304,29 @@ const serializeAws_json1_1ResourceIdList = (input: string[], context: __SerdeCon }); }; +const serializeAws_json1_1RestoreEventDataStoreRequest = ( + input: RestoreEventDataStoreRequest, + context: __SerdeContext +): any => { + return { + ...(input.EventDataStore !== undefined && + input.EventDataStore !== null && { EventDataStore: input.EventDataStore }), + }; +}; + const serializeAws_json1_1StartLoggingRequest = (input: StartLoggingRequest, context: __SerdeContext): any => { return { ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), }; }; +const serializeAws_json1_1StartQueryRequest = (input: StartQueryRequest, context: __SerdeContext): any => { + return { + ...(input.QueryStatement !== undefined && + input.QueryStatement !== null && { QueryStatement: input.QueryStatement }), + }; +}; + const serializeAws_json1_1StopLoggingRequest = (input: StopLoggingRequest, context: __SerdeContext): any => { return { ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), @@ -3486,6 +5362,31 @@ const serializeAws_json1_1TrailNameList = (input: string[], context: __SerdeCont }); }; +const serializeAws_json1_1UpdateEventDataStoreRequest = ( + input: UpdateEventDataStoreRequest, + context: __SerdeContext +): any => { + return { + ...(input.AdvancedEventSelectors !== undefined && + input.AdvancedEventSelectors !== null && { + AdvancedEventSelectors: serializeAws_json1_1AdvancedEventSelectors(input.AdvancedEventSelectors, context), + }), + ...(input.EventDataStore !== undefined && + input.EventDataStore !== null && { EventDataStore: input.EventDataStore }), + ...(input.MultiRegionEnabled !== undefined && + input.MultiRegionEnabled !== null && { MultiRegionEnabled: input.MultiRegionEnabled }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.OrganizationEnabled !== undefined && + input.OrganizationEnabled !== null && { OrganizationEnabled: input.OrganizationEnabled }), + ...(input.RetentionPeriod !== undefined && + input.RetentionPeriod !== null && { RetentionPeriod: input.RetentionPeriod }), + ...(input.TerminationProtectionEnabled !== undefined && + input.TerminationProtectionEnabled !== null && { + TerminationProtectionEnabled: input.TerminationProtectionEnabled, + }), + }; +}; + const serializeAws_json1_1UpdateTrailRequest = (input: UpdateTrailRequest, context: __SerdeContext): any => { return { ...(input.CloudWatchLogsLogGroupArn !== undefined && @@ -3580,6 +5481,13 @@ const deserializeAws_json1_1AdvancedFieldSelectors = ( }); }; +const deserializeAws_json1_1CancelQueryResponse = (output: any, context: __SerdeContext): CancelQueryResponse => { + return { + QueryId: __expectString(output.QueryId), + QueryStatus: __expectString(output.QueryStatus), + } as any; +}; + const deserializeAws_json1_1CloudTrailAccessNotEnabledException = ( output: any, context: __SerdeContext @@ -3622,6 +5530,37 @@ const deserializeAws_json1_1ConflictException = (output: any, context: __SerdeCo } as any; }; +const deserializeAws_json1_1CreateEventDataStoreResponse = ( + output: any, + context: __SerdeContext +): CreateEventDataStoreResponse => { + return { + AdvancedEventSelectors: + output.AdvancedEventSelectors !== undefined && output.AdvancedEventSelectors !== null + ? deserializeAws_json1_1AdvancedEventSelectors(output.AdvancedEventSelectors, context) + : undefined, + CreatedTimestamp: + output.CreatedTimestamp !== undefined && output.CreatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreatedTimestamp))) + : undefined, + EventDataStoreArn: __expectString(output.EventDataStoreArn), + MultiRegionEnabled: __expectBoolean(output.MultiRegionEnabled), + Name: __expectString(output.Name), + OrganizationEnabled: __expectBoolean(output.OrganizationEnabled), + RetentionPeriod: __expectInt32(output.RetentionPeriod), + Status: __expectString(output.Status), + TagsList: + output.TagsList !== undefined && output.TagsList !== null + ? deserializeAws_json1_1TagsList(output.TagsList, context) + : undefined, + TerminationProtectionEnabled: __expectBoolean(output.TerminationProtectionEnabled), + UpdatedTimestamp: + output.UpdatedTimestamp !== undefined && output.UpdatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.UpdatedTimestamp))) + : undefined, + } as any; +}; + const deserializeAws_json1_1CreateTrailResponse = (output: any, context: __SerdeContext): CreateTrailResponse => { return { CloudWatchLogsLogGroupArn: __expectString(output.CloudWatchLogsLogGroupArn), @@ -3672,10 +5611,30 @@ const deserializeAws_json1_1DataResourceValues = (output: any, context: __SerdeC }); }; +const deserializeAws_json1_1DeleteEventDataStoreResponse = ( + output: any, + context: __SerdeContext +): DeleteEventDataStoreResponse => { + return {} as any; +}; + const deserializeAws_json1_1DeleteTrailResponse = (output: any, context: __SerdeContext): DeleteTrailResponse => { return {} as any; }; +const deserializeAws_json1_1DescribeQueryResponse = (output: any, context: __SerdeContext): DescribeQueryResponse => { + return { + ErrorMessage: __expectString(output.ErrorMessage), + QueryId: __expectString(output.QueryId), + QueryStatistics: + output.QueryStatistics !== undefined && output.QueryStatistics !== null + ? deserializeAws_json1_1QueryStatisticsForDescribeQuery(output.QueryStatistics, context) + : undefined, + QueryStatus: __expectString(output.QueryStatus), + QueryString: __expectString(output.QueryString), + } as any; +}; + const deserializeAws_json1_1DescribeTrailsResponse = (output: any, context: __SerdeContext): DescribeTrailsResponse => { return { trailList: @@ -3705,6 +5664,86 @@ const deserializeAws_json1_1Event = (output: any, context: __SerdeContext): Even } as any; }; +const deserializeAws_json1_1EventDataStore = (output: any, context: __SerdeContext): EventDataStore => { + return { + AdvancedEventSelectors: + output.AdvancedEventSelectors !== undefined && output.AdvancedEventSelectors !== null + ? deserializeAws_json1_1AdvancedEventSelectors(output.AdvancedEventSelectors, context) + : undefined, + CreatedTimestamp: + output.CreatedTimestamp !== undefined && output.CreatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreatedTimestamp))) + : undefined, + EventDataStoreArn: __expectString(output.EventDataStoreArn), + MultiRegionEnabled: __expectBoolean(output.MultiRegionEnabled), + Name: __expectString(output.Name), + OrganizationEnabled: __expectBoolean(output.OrganizationEnabled), + RetentionPeriod: __expectInt32(output.RetentionPeriod), + Status: __expectString(output.Status), + TerminationProtectionEnabled: __expectBoolean(output.TerminationProtectionEnabled), + UpdatedTimestamp: + output.UpdatedTimestamp !== undefined && output.UpdatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.UpdatedTimestamp))) + : undefined, + } as any; +}; + +const deserializeAws_json1_1EventDataStoreAlreadyExistsException = ( + output: any, + context: __SerdeContext +): EventDataStoreAlreadyExistsException => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1EventDataStoreARNInvalidException = ( + output: any, + context: __SerdeContext +): EventDataStoreARNInvalidException => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1EventDataStoreMaxLimitExceededException = ( + output: any, + context: __SerdeContext +): EventDataStoreMaxLimitExceededException => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1EventDataStoreNotFoundException = ( + output: any, + context: __SerdeContext +): EventDataStoreNotFoundException => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1EventDataStores = (output: any, context: __SerdeContext): EventDataStore[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1EventDataStore(entry, context); + }); +}; + +const deserializeAws_json1_1EventDataStoreTerminationProtectedException = ( + output: any, + context: __SerdeContext +): EventDataStoreTerminationProtectedException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1EventSelector = (output: any, context: __SerdeContext): EventSelector => { return { DataResources: @@ -3753,6 +5792,33 @@ const deserializeAws_json1_1ExcludeManagementEventSources = (output: any, contex }); }; +const deserializeAws_json1_1GetEventDataStoreResponse = ( + output: any, + context: __SerdeContext +): GetEventDataStoreResponse => { + return { + AdvancedEventSelectors: + output.AdvancedEventSelectors !== undefined && output.AdvancedEventSelectors !== null + ? deserializeAws_json1_1AdvancedEventSelectors(output.AdvancedEventSelectors, context) + : undefined, + CreatedTimestamp: + output.CreatedTimestamp !== undefined && output.CreatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreatedTimestamp))) + : undefined, + EventDataStoreArn: __expectString(output.EventDataStoreArn), + MultiRegionEnabled: __expectBoolean(output.MultiRegionEnabled), + Name: __expectString(output.Name), + OrganizationEnabled: __expectBoolean(output.OrganizationEnabled), + RetentionPeriod: __expectInt32(output.RetentionPeriod), + Status: __expectString(output.Status), + TerminationProtectionEnabled: __expectBoolean(output.TerminationProtectionEnabled), + UpdatedTimestamp: + output.UpdatedTimestamp !== undefined && output.UpdatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.UpdatedTimestamp))) + : undefined, + } as any; +}; + const deserializeAws_json1_1GetEventSelectorsResponse = ( output: any, context: __SerdeContext @@ -3783,6 +5849,25 @@ const deserializeAws_json1_1GetInsightSelectorsResponse = ( } as any; }; +const deserializeAws_json1_1GetQueryResultsResponse = ( + output: any, + context: __SerdeContext +): GetQueryResultsResponse => { + return { + ErrorMessage: __expectString(output.ErrorMessage), + NextToken: __expectString(output.NextToken), + QueryResultRows: + output.QueryResultRows !== undefined && output.QueryResultRows !== null + ? deserializeAws_json1_1QueryResultRows(output.QueryResultRows, context) + : undefined, + QueryStatistics: + output.QueryStatistics !== undefined && output.QueryStatistics !== null + ? deserializeAws_json1_1QueryStatistics(output.QueryStatistics, context) + : undefined, + QueryStatus: __expectString(output.QueryStatus), + } as any; +}; + const deserializeAws_json1_1GetTrailResponse = (output: any, context: __SerdeContext): GetTrailResponse => { return { Trail: @@ -3832,6 +5917,21 @@ const deserializeAws_json1_1GetTrailStatusResponse = (output: any, context: __Se } as any; }; +const deserializeAws_json1_1InactiveEventDataStoreException = ( + output: any, + context: __SerdeContext +): InactiveEventDataStoreException => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1InactiveQueryException = (output: any, context: __SerdeContext): InactiveQueryException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1InsightNotEnabledException = ( output: any, context: __SerdeContext @@ -3912,6 +6012,15 @@ const deserializeAws_json1_1InvalidCloudWatchLogsRoleArnException = ( } as any; }; +const deserializeAws_json1_1InvalidDateRangeException = ( + output: any, + context: __SerdeContext +): InvalidDateRangeException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1InvalidEventCategoryException = ( output: any, context: __SerdeContext @@ -3921,6 +6030,15 @@ const deserializeAws_json1_1InvalidEventCategoryException = ( } as any; }; +const deserializeAws_json1_1InvalidEventDataStoreStatusException = ( + output: any, + context: __SerdeContext +): InvalidEventDataStoreStatusException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1InvalidEventSelectorsException = ( output: any, context: __SerdeContext @@ -3993,6 +6111,33 @@ const deserializeAws_json1_1InvalidParameterCombinationException = ( } as any; }; +const deserializeAws_json1_1InvalidParameterException = ( + output: any, + context: __SerdeContext +): InvalidParameterException => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1InvalidQueryStatementException = ( + output: any, + context: __SerdeContext +): InvalidQueryStatementException => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1InvalidQueryStatusException = ( + output: any, + context: __SerdeContext +): InvalidQueryStatusException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1InvalidS3BucketNameException = ( output: any, context: __SerdeContext @@ -4077,6 +6222,19 @@ const deserializeAws_json1_1KmsKeyNotFoundException = ( } as any; }; +const deserializeAws_json1_1ListEventDataStoresResponse = ( + output: any, + context: __SerdeContext +): ListEventDataStoresResponse => { + return { + EventDataStores: + output.EventDataStores !== undefined && output.EventDataStores !== null + ? deserializeAws_json1_1EventDataStores(output.EventDataStores, context) + : undefined, + NextToken: __expectString(output.NextToken), + } as any; +}; + const deserializeAws_json1_1ListPublicKeysResponse = (output: any, context: __SerdeContext): ListPublicKeysResponse => { return { NextToken: __expectString(output.NextToken), @@ -4087,6 +6245,16 @@ const deserializeAws_json1_1ListPublicKeysResponse = (output: any, context: __Se } as any; }; +const deserializeAws_json1_1ListQueriesResponse = (output: any, context: __SerdeContext): ListQueriesResponse => { + return { + NextToken: __expectString(output.NextToken), + Queries: + output.Queries !== undefined && output.Queries !== null + ? deserializeAws_json1_1Queries(output.Queries, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1ListTagsResponse = (output: any, context: __SerdeContext): ListTagsResponse => { return { NextToken: __expectString(output.NextToken), @@ -4117,6 +6285,15 @@ const deserializeAws_json1_1LookupEventsResponse = (output: any, context: __Serd } as any; }; +const deserializeAws_json1_1MaxConcurrentQueriesException = ( + output: any, + context: __SerdeContext +): MaxConcurrentQueriesException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1MaximumNumberOfTrailsExceededException = ( output: any, context: __SerdeContext @@ -4229,6 +6406,93 @@ const deserializeAws_json1_1PutInsightSelectorsResponse = ( } as any; }; +const deserializeAws_json1_1Queries = (output: any, context: __SerdeContext): Query[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1Query(entry, context); + }); +}; + +const deserializeAws_json1_1Query = (output: any, context: __SerdeContext): Query => { + return { + CreationTime: + output.CreationTime !== undefined && output.CreationTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime))) + : undefined, + QueryId: __expectString(output.QueryId), + QueryStatus: __expectString(output.QueryStatus), + } as any; +}; + +const deserializeAws_json1_1QueryIdNotFoundException = ( + output: any, + context: __SerdeContext +): QueryIdNotFoundException => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1QueryResultColumn = (output: any, context: __SerdeContext): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_json1_1QueryResultRow = (output: any, context: __SerdeContext): { [key: string]: string }[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1QueryResultColumn(entry, context); + }); +}; + +const deserializeAws_json1_1QueryResultRows = (output: any, context: __SerdeContext): { [key: string]: string }[][] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1QueryResultRow(entry, context); + }); +}; + +const deserializeAws_json1_1QueryStatistics = (output: any, context: __SerdeContext): QueryStatistics => { + return { + ResultsCount: __expectInt32(output.ResultsCount), + TotalResultsCount: __expectInt32(output.TotalResultsCount), + } as any; +}; + +const deserializeAws_json1_1QueryStatisticsForDescribeQuery = ( + output: any, + context: __SerdeContext +): QueryStatisticsForDescribeQuery => { + return { + CreationTime: + output.CreationTime !== undefined && output.CreationTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime))) + : undefined, + EventsMatched: __expectLong(output.EventsMatched), + EventsScanned: __expectLong(output.EventsScanned), + ExecutionTimeInMillis: __expectInt32(output.ExecutionTimeInMillis), + } as any; +}; + const deserializeAws_json1_1RemoveTagsResponse = (output: any, context: __SerdeContext): RemoveTagsResponse => { return {} as any; }; @@ -4290,6 +6554,33 @@ const deserializeAws_json1_1ResourceTypeNotSupportedException = ( } as any; }; +const deserializeAws_json1_1RestoreEventDataStoreResponse = ( + output: any, + context: __SerdeContext +): RestoreEventDataStoreResponse => { + return { + AdvancedEventSelectors: + output.AdvancedEventSelectors !== undefined && output.AdvancedEventSelectors !== null + ? deserializeAws_json1_1AdvancedEventSelectors(output.AdvancedEventSelectors, context) + : undefined, + CreatedTimestamp: + output.CreatedTimestamp !== undefined && output.CreatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreatedTimestamp))) + : undefined, + EventDataStoreArn: __expectString(output.EventDataStoreArn), + MultiRegionEnabled: __expectBoolean(output.MultiRegionEnabled), + Name: __expectString(output.Name), + OrganizationEnabled: __expectBoolean(output.OrganizationEnabled), + RetentionPeriod: __expectInt32(output.RetentionPeriod), + Status: __expectString(output.Status), + TerminationProtectionEnabled: __expectBoolean(output.TerminationProtectionEnabled), + UpdatedTimestamp: + output.UpdatedTimestamp !== undefined && output.UpdatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.UpdatedTimestamp))) + : undefined, + } as any; +}; + const deserializeAws_json1_1S3BucketDoesNotExistException = ( output: any, context: __SerdeContext @@ -4303,6 +6594,12 @@ const deserializeAws_json1_1StartLoggingResponse = (output: any, context: __Serd return {} as any; }; +const deserializeAws_json1_1StartQueryResponse = (output: any, context: __SerdeContext): StartQueryResponse => { + return { + QueryId: __expectString(output.QueryId), + } as any; +}; + const deserializeAws_json1_1StopLoggingResponse = (output: any, context: __SerdeContext): StopLoggingResponse => { return {} as any; }; @@ -4418,6 +6715,33 @@ const deserializeAws_json1_1UnsupportedOperationException = ( } as any; }; +const deserializeAws_json1_1UpdateEventDataStoreResponse = ( + output: any, + context: __SerdeContext +): UpdateEventDataStoreResponse => { + return { + AdvancedEventSelectors: + output.AdvancedEventSelectors !== undefined && output.AdvancedEventSelectors !== null + ? deserializeAws_json1_1AdvancedEventSelectors(output.AdvancedEventSelectors, context) + : undefined, + CreatedTimestamp: + output.CreatedTimestamp !== undefined && output.CreatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreatedTimestamp))) + : undefined, + EventDataStoreArn: __expectString(output.EventDataStoreArn), + MultiRegionEnabled: __expectBoolean(output.MultiRegionEnabled), + Name: __expectString(output.Name), + OrganizationEnabled: __expectBoolean(output.OrganizationEnabled), + RetentionPeriod: __expectInt32(output.RetentionPeriod), + Status: __expectString(output.Status), + TerminationProtectionEnabled: __expectBoolean(output.TerminationProtectionEnabled), + UpdatedTimestamp: + output.UpdatedTimestamp !== undefined && output.UpdatedTimestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.UpdatedTimestamp))) + : undefined, + } as any; +}; + const deserializeAws_json1_1UpdateTrailResponse = (output: any, context: __SerdeContext): UpdateTrailResponse => { return { CloudWatchLogsLogGroupArn: __expectString(output.CloudWatchLogsLogGroupArn), diff --git a/clients/client-detective/README.md b/clients/client-detective/README.md index 894184855b88..c132c9042e94 100644 --- a/clients/client-detective/README.md +++ b/clients/client-detective/README.md @@ -7,20 +7,32 @@ AWS SDK for JavaScript Detective Client for Node.js, Browser and React Native. -

Detective uses machine learning and purpose-built visualizations to help you analyze and -investigate security issues across your Amazon Web Services (AWS) workloads. Detective automatically -extracts time-based events such as login attempts, API calls, and network traffic from -AWS CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by +

Detective uses machine learning and purpose-built visualizations to help you to +analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such +as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by Amazon GuardDuty.

-

The Detective API primarily supports the creation and management of behavior graphs. A -behavior graph contains the extracted data from a set of member accounts, and is created -and managed by an administrator account.

-

Every behavior graph is specific to a Region. You can only use the API to manage graphs -that belong to the Region that is associated with the currently selected endpoint.

-

A Detective administrator account can use the Detective API to do the following:

+

The Detective API primarily supports the creation and management of behavior +graphs. A behavior graph contains the extracted data from a set of member accounts, and is +created and managed by an administrator account.

+

To add a member account to the behavior graph, the administrator account sends an +invitation to the account. When the account accepts the invitation, it becomes a member +account in the behavior graph.

+

Detective is also integrated with Organizations. The organization +management account designates the Detective administrator account for the +organization. That account becomes the administrator account for the organization behavior +graph. The Detective administrator account can enable any organization account as +a member account in the organization behavior graph. The organization accounts do not +receive invitations. The Detective administrator account can also invite other +accounts to the organization behavior graph.

+

Every behavior graph is specific to a Region. You can only use the API to manage +behavior graphs that belong to the Region that is associated with the currently selected +endpoint.

+

The administrator account for a behavior graph can use the Detective API to do +the following:

  • -

    Enable and disable Detective. Enabling Detective creates a new behavior graph.

    +

    Enable and disable Detective. Enabling Detective creates a new +behavior graph.

  • View the list of member accounts in a behavior graph.

    @@ -31,8 +43,23 @@ that belong to the Region that is associated with the currently selected endpoin
  • Remove member accounts from a behavior graph.

  • +
  • +

    Apply tags to a behavior graph.

    +
  • +
+

The organization management account can use the Detective API to select the +delegated administrator for Detective.

+

The Detective administrator account for an organization can use the Detective API to do the following:

+
    +
  • +

    Perform all of the functions of an administrator account.

    +
  • +
  • +

    Determine whether to automatically enable new organization accounts as member +accounts in the organization behavior graph.

    +
-

A member account can use the Detective API to do the following:

+

An invited member account can use the Detective API to do the following:

  • View the list of behavior graphs that they are invited to.

    @@ -51,7 +78,8 @@ that belong to the Region that is associated with the currently selected endpoin

    We replaced the term "master account" with the term "administrator account." An administrator account is used to centrally manage multiple accounts. In the case of -Detective, the administrator account manages the accounts in their behavior graph.

    +Detective, the administrator account manages the accounts in their behavior +graph.

    ## Installing diff --git a/clients/client-detective/src/Detective.ts b/clients/client-detective/src/Detective.ts index 4b1c8edea983..ab49cc33d6b8 100644 --- a/clients/client-detective/src/Detective.ts +++ b/clients/client-detective/src/Detective.ts @@ -17,11 +17,26 @@ import { DeleteMembersCommandInput, DeleteMembersCommandOutput, } from "./commands/DeleteMembersCommand"; +import { + DescribeOrganizationConfigurationCommand, + DescribeOrganizationConfigurationCommandInput, + DescribeOrganizationConfigurationCommandOutput, +} from "./commands/DescribeOrganizationConfigurationCommand"; +import { + DisableOrganizationAdminAccountCommand, + DisableOrganizationAdminAccountCommandInput, + DisableOrganizationAdminAccountCommandOutput, +} from "./commands/DisableOrganizationAdminAccountCommand"; import { DisassociateMembershipCommand, DisassociateMembershipCommandInput, DisassociateMembershipCommandOutput, } from "./commands/DisassociateMembershipCommand"; +import { + EnableOrganizationAdminAccountCommand, + EnableOrganizationAdminAccountCommandInput, + EnableOrganizationAdminAccountCommandOutput, +} from "./commands/EnableOrganizationAdminAccountCommand"; import { GetMembersCommand, GetMembersCommandInput, GetMembersCommandOutput } from "./commands/GetMembersCommand"; import { ListGraphsCommand, ListGraphsCommandInput, ListGraphsCommandOutput } from "./commands/ListGraphsCommand"; import { @@ -30,6 +45,11 @@ import { ListInvitationsCommandOutput, } from "./commands/ListInvitationsCommand"; import { ListMembersCommand, ListMembersCommandInput, ListMembersCommandOutput } from "./commands/ListMembersCommand"; +import { + ListOrganizationAdminAccountsCommand, + ListOrganizationAdminAccountsCommandInput, + ListOrganizationAdminAccountsCommandOutput, +} from "./commands/ListOrganizationAdminAccountsCommand"; import { ListTagsForResourceCommand, ListTagsForResourceCommandInput, @@ -51,23 +71,40 @@ import { UntagResourceCommandInput, UntagResourceCommandOutput, } from "./commands/UntagResourceCommand"; +import { + UpdateOrganizationConfigurationCommand, + UpdateOrganizationConfigurationCommandInput, + UpdateOrganizationConfigurationCommandOutput, +} from "./commands/UpdateOrganizationConfigurationCommand"; import { DetectiveClient } from "./DetectiveClient"; /** - *

    Detective uses machine learning and purpose-built visualizations to help you analyze and - * investigate security issues across your Amazon Web Services (AWS) workloads. Detective automatically - * extracts time-based events such as login attempts, API calls, and network traffic from - * AWS CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by - * Amazon GuardDuty.

    - *

    The Detective API primarily supports the creation and management of behavior graphs. A - * behavior graph contains the extracted data from a set of member accounts, and is created - * and managed by an administrator account.

    - *

    Every behavior graph is specific to a Region. You can only use the API to manage graphs - * that belong to the Region that is associated with the currently selected endpoint.

    - *

    A Detective administrator account can use the Detective API to do the following:

    + *

    Detective uses machine learning and purpose-built visualizations to help you to + * analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such + * as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by + * Amazon GuardDuty.

    + *

    The Detective API primarily supports the creation and management of behavior + * graphs. A behavior graph contains the extracted data from a set of member accounts, and is + * created and managed by an administrator account.

    + *

    To add a member account to the behavior graph, the administrator account sends an + * invitation to the account. When the account accepts the invitation, it becomes a member + * account in the behavior graph.

    + *

    Detective is also integrated with Organizations. The organization + * management account designates the Detective administrator account for the + * organization. That account becomes the administrator account for the organization behavior + * graph. The Detective administrator account can enable any organization account as + * a member account in the organization behavior graph. The organization accounts do not + * receive invitations. The Detective administrator account can also invite other + * accounts to the organization behavior graph.

    + *

    Every behavior graph is specific to a Region. You can only use the API to manage + * behavior graphs that belong to the Region that is associated with the currently selected + * endpoint.

    + *

    The administrator account for a behavior graph can use the Detective API to do + * the following:

    *
      *
    • - *

      Enable and disable Detective. Enabling Detective creates a new behavior graph.

      + *

      Enable and disable Detective. Enabling Detective creates a new + * behavior graph.

      *
    • *
    • *

      View the list of member accounts in a behavior graph.

      @@ -78,8 +115,23 @@ import { DetectiveClient } from "./DetectiveClient"; *
    • *

      Remove member accounts from a behavior graph.

      *
    • + *
    • + *

      Apply tags to a behavior graph.

      + *
    • + *
    + *

    The organization management account can use the Detective API to select the + * delegated administrator for Detective.

    + *

    The Detective administrator account for an organization can use the Detective API to do the following:

    + *
      + *
    • + *

      Perform all of the functions of an administrator account.

      + *
    • + *
    • + *

      Determine whether to automatically enable new organization accounts as member + * accounts in the organization behavior graph.

      + *
    • *
    - *

    A member account can use the Detective API to do the following:

    + *

    An invited member account can use the Detective API to do the following:

    *
      *
    • *

      View the list of behavior graphs that they are invited to.

      @@ -98,7 +150,8 @@ import { DetectiveClient } from "./DetectiveClient"; * *

      We replaced the term "master account" with the term "administrator account." An * administrator account is used to centrally manage multiple accounts. In the case of - * Detective, the administrator account manages the accounts in their behavior graph.

      + * Detective, the administrator account manages the accounts in their behavior + * graph.

      *
      */ export class Detective extends DetectiveClient { @@ -139,15 +192,15 @@ export class Detective extends DetectiveClient { /** *

      Creates a new behavior graph for the calling account, and sets that account as the - * administrator account. This operation is called by the account that is enabling - * Detective.

      - *

      Before you try to enable Detective, make sure that your account has been enrolled in - * Amazon GuardDuty for at least 48 hours. If you do not meet this requirement, you cannot enable - * Detective. If you do meet the GuardDuty prerequisite, then when you make the request to enable - * Detective, it checks whether your data volume is within the Detective quota. If it exceeds the - * quota, then you cannot enable Detective.

      - *

      The operation also enables Detective for the calling account in the currently selected - * Region. It returns the ARN of the new behavior graph.

      + * administrator account. This operation is called by the account that is enabling Detective.

      + *

      Before you try to enable Detective, make sure that your account has been + * enrolled in Amazon GuardDuty for at least 48 hours. If you do not meet this + * requirement, you cannot enable Detective. If you do meet the GuardDuty + * prerequisite, then when you make the request to enable Detective, it checks + * whether your data volume is within the Detective quota. If it exceeds the quota, + * then you cannot enable Detective.

      + *

      The operation also enables Detective for the calling account in the currently + * selected Region. It returns the ARN of the new behavior graph.

      *

      * CreateGraph triggers a process to create the corresponding data tables for * the new behavior graph.

      @@ -179,21 +232,31 @@ export class Detective extends DetectiveClient { } /** - *

      Sends a request to invite the specified AWS accounts to be member accounts in the - * behavior graph. This operation can only be called by the administrator account for a - * behavior graph.

      + *

      + * CreateMembers is used to send invitations to accounts. For the organization + * behavior graph, the Detective administrator account uses + * CreateMembers to enable organization accounts as member accounts.

      + *

      For invited accounts, CreateMembers sends a request to invite the specified + * Amazon Web Services accounts to be member accounts in the behavior graph. This operation + * can only be called by the administrator account for a behavior graph.

      *

      * CreateMembers verifies the accounts and then invites the verified accounts. * The administrator can optionally specify to not send invitation emails to the member * accounts. This would be used when the administrator manages their member accounts * centrally.

      - *

      The request provides the behavior graph ARN and the list of accounts to invite.

      + *

      For organization accounts in the organization behavior graph, CreateMembers + * attempts to enable the accounts. The organization accounts do not receive + * invitations.

      + *

      The request provides the behavior graph ARN and the list of accounts to invite or to + * enable.

      *

      The response separates the requested accounts into two lists:

      *
        *
      • - *

        The accounts that CreateMembers was able to start the verification - * for. This list includes member accounts that are being verified, that have passed - * verification and are to be invited, and that have failed verification.

        + *

        The accounts that CreateMembers was able to process. For invited + * accounts, includes member accounts that are being verified, that have passed + * verification and are to be invited, and that have failed verification. For + * organization accounts in the organization behavior graph, includes accounts that can + * be enabled and that cannot be enabled.

        *
      • *
      • *

        The accounts that CreateMembers was unable to process. This list @@ -233,7 +296,7 @@ export class Detective extends DetectiveClient { /** *

        Disables the specified behavior graph and queues it to be deleted. This operation - * removes the graph from each member account's list of behavior graphs.

        + * removes the behavior graph from each member account's list of behavior graphs.

        *

        * DeleteGraph can only be called by the administrator account for a behavior * graph.

        @@ -262,11 +325,19 @@ export class Detective extends DetectiveClient { } /** - *

        Deletes one or more member accounts from the administrator account's behavior graph. - * This operation can only be called by a Detective administrator account. That account cannot use - * DeleteMembers to delete their own account from the behavior graph. To - * disable a behavior graph, the administrator account uses the DeleteGraph API - * method.

        + *

        Removes the specified member accounts from the behavior graph. The removed accounts no + * longer contribute data to the behavior graph. This operation can only be called by the + * administrator account for the behavior graph.

        + *

        For invited accounts, the removed accounts are deleted from the list of accounts in the + * behavior graph. To restore the account, the administrator account must send another + * invitation.

        + *

        For organization accounts in the organization behavior graph, the Detective + * administrator account can always enable the organization account again. Organization + * accounts that are not enabled as member accounts are not included in the + * ListMembers results for the organization behavior graph.

        + *

        An administrator account cannot use DeleteMembers to remove their own + * account from the behavior graph. To disable a behavior graph, the administrator account + * uses the DeleteGraph API method.

        */ public deleteMembers( args: DeleteMembersCommandInput, @@ -297,9 +368,85 @@ export class Detective extends DetectiveClient { } } + /** + *

        Returns information about the configuration for the organization behavior graph. + * Currently indicates whether to automatically enable new organization accounts as member + * accounts.

        + *

        Can only be called by the Detective administrator account for the organization.

        + */ + public describeOrganizationConfiguration( + args: DescribeOrganizationConfigurationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeOrganizationConfiguration( + args: DescribeOrganizationConfigurationCommandInput, + cb: (err: any, data?: DescribeOrganizationConfigurationCommandOutput) => void + ): void; + public describeOrganizationConfiguration( + args: DescribeOrganizationConfigurationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeOrganizationConfigurationCommandOutput) => void + ): void; + public describeOrganizationConfiguration( + args: DescribeOrganizationConfigurationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeOrganizationConfigurationCommandOutput) => void), + cb?: (err: any, data?: DescribeOrganizationConfigurationCommandOutput) => void + ): Promise | void { + const command = new DescribeOrganizationConfigurationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

        Removes the Detective administrator account for the organization in the current + * Region. Deletes the behavior graph for that account.

        + *

        Can only be called by the organization management account. Before you can select a + * different Detective administrator account, you must remove the Detective + * administrator account in all Regions.

        + */ + public disableOrganizationAdminAccount( + args: DisableOrganizationAdminAccountCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disableOrganizationAdminAccount( + args: DisableOrganizationAdminAccountCommandInput, + cb: (err: any, data?: DisableOrganizationAdminAccountCommandOutput) => void + ): void; + public disableOrganizationAdminAccount( + args: DisableOrganizationAdminAccountCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisableOrganizationAdminAccountCommandOutput) => void + ): void; + public disableOrganizationAdminAccount( + args: DisableOrganizationAdminAccountCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisableOrganizationAdminAccountCommandOutput) => void), + cb?: (err: any, data?: DisableOrganizationAdminAccountCommandOutput) => void + ): Promise | void { + const command = new DisableOrganizationAdminAccountCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

        Removes the member account from the specified behavior graph. This operation can only be - * called by a member account that has the ENABLED status.

        + * called by an invited member account that has the ENABLED status.

        + *

        + * DisassociateMembership cannot be called by an organization account in the + * organization behavior graph. For the organization behavior graph, the Detective + * administrator account determines which organization accounts to enable or disable as member + * accounts.

        */ public disassociateMembership( args: DisassociateMembershipCommandInput, @@ -330,6 +477,45 @@ export class Detective extends DetectiveClient { } } + /** + *

        Designates the Detective administrator account for the organization in the + * current Region.

        + *

        If the account does not have Detective enabled, then enables Detective + * for that account and creates a new behavior graph.

        + *

        Can only be called by the organization management account.

        + *

        The Detective administrator account for an organization must be the same in all + * Regions. If you already designated a Detective administrator account in another + * Region, then you must designate the same account.

        + */ + public enableOrganizationAdminAccount( + args: EnableOrganizationAdminAccountCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public enableOrganizationAdminAccount( + args: EnableOrganizationAdminAccountCommandInput, + cb: (err: any, data?: EnableOrganizationAdminAccountCommandOutput) => void + ): void; + public enableOrganizationAdminAccount( + args: EnableOrganizationAdminAccountCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: EnableOrganizationAdminAccountCommandOutput) => void + ): void; + public enableOrganizationAdminAccount( + args: EnableOrganizationAdminAccountCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: EnableOrganizationAdminAccountCommandOutput) => void), + cb?: (err: any, data?: EnableOrganizationAdminAccountCommandOutput) => void + ): Promise | void { + const command = new EnableOrganizationAdminAccountCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

        Returns the membership details for specified member accounts for a behavior * graph.

        @@ -388,7 +574,7 @@ export class Detective extends DetectiveClient { /** *

        Retrieves the list of open and accepted behavior graph invitations for the member - * account. This operation can only be called by a member account.

        + * account. This operation can only be called by an invited member account.

        *

        Open invitations are invitations that the member account has not responded to.

        *

        The results do not include behavior graphs for which the member account declined the * invitation. The results also do not include behavior graphs that the member account @@ -424,8 +610,12 @@ export class Detective extends DetectiveClient { } /** - *

        Retrieves the list of member accounts for a behavior graph. Does not return member - * accounts that were removed from the behavior graph.

        + *

        Retrieves the list of member accounts for a behavior graph.

        + *

        For invited accounts, the results do not include member accounts that were removed from + * the behavior graph.

        + *

        For the organization behavior graph, the results do not include organization accounts + * that the Detective administrator account has not enabled as member + * accounts.

        */ public listMembers(args: ListMembersCommandInput, options?: __HttpHandlerOptions): Promise; public listMembers(args: ListMembersCommandInput, cb: (err: any, data?: ListMembersCommandOutput) => void): void; @@ -450,6 +640,39 @@ export class Detective extends DetectiveClient { } } + /** + *

        Returns information about the Detective administrator account for an + * organization. Can only be called by the organization management account.

        + */ + public listOrganizationAdminAccounts( + args: ListOrganizationAdminAccountsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listOrganizationAdminAccounts( + args: ListOrganizationAdminAccountsCommandInput, + cb: (err: any, data?: ListOrganizationAdminAccountsCommandOutput) => void + ): void; + public listOrganizationAdminAccounts( + args: ListOrganizationAdminAccountsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListOrganizationAdminAccountsCommandOutput) => void + ): void; + public listOrganizationAdminAccounts( + args: ListOrganizationAdminAccountsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListOrganizationAdminAccountsCommandOutput) => void), + cb?: (err: any, data?: ListOrganizationAdminAccountsCommandOutput) => void + ): Promise | void { + const command = new ListOrganizationAdminAccountsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

        Returns the tag values that are assigned to a behavior graph.

        */ @@ -484,7 +707,12 @@ export class Detective extends DetectiveClient { /** *

        Rejects an invitation to contribute the account data to a behavior graph. This operation - * must be called by a member account that has the INVITED status.

        + * must be called by an invited member account that has the INVITED + * status.

        + *

        + * RejectInvitation cannot be called by an organization account in the + * organization behavior graph. In the organization behavior graph, organization accounts do + * not receive an invitation.

        */ public rejectInvitation( args: RejectInvitationCommandInput, @@ -522,7 +750,7 @@ export class Detective extends DetectiveClient { *
          *
        • *

          If Detective enabled the member account, then the new status is - * ENABLED.

          + * ENABLED.

          *
        • *
        • *

          If Detective cannot enable the member account, the status remains @@ -616,4 +844,38 @@ export class Detective extends DetectiveClient { return this.send(command, optionsOrCb); } } + + /** + *

          Updates the configuration for the Organizations integration in the current Region. + * Can only be called by the Detective administrator account for the + * organization.

          + */ + public updateOrganizationConfiguration( + args: UpdateOrganizationConfigurationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateOrganizationConfiguration( + args: UpdateOrganizationConfigurationCommandInput, + cb: (err: any, data?: UpdateOrganizationConfigurationCommandOutput) => void + ): void; + public updateOrganizationConfiguration( + args: UpdateOrganizationConfigurationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateOrganizationConfigurationCommandOutput) => void + ): void; + public updateOrganizationConfiguration( + args: UpdateOrganizationConfigurationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateOrganizationConfigurationCommandOutput) => void), + cb?: (err: any, data?: UpdateOrganizationConfigurationCommandOutput) => void + ): Promise | void { + const command = new UpdateOrganizationConfigurationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } } diff --git a/clients/client-detective/src/DetectiveClient.ts b/clients/client-detective/src/DetectiveClient.ts index d9458bbc8995..40ff24a08c0b 100644 --- a/clients/client-detective/src/DetectiveClient.ts +++ b/clients/client-detective/src/DetectiveClient.ts @@ -54,14 +54,30 @@ import { CreateGraphCommandInput, CreateGraphCommandOutput } from "./commands/Cr import { CreateMembersCommandInput, CreateMembersCommandOutput } from "./commands/CreateMembersCommand"; import { DeleteGraphCommandInput, DeleteGraphCommandOutput } from "./commands/DeleteGraphCommand"; import { DeleteMembersCommandInput, DeleteMembersCommandOutput } from "./commands/DeleteMembersCommand"; +import { + DescribeOrganizationConfigurationCommandInput, + DescribeOrganizationConfigurationCommandOutput, +} from "./commands/DescribeOrganizationConfigurationCommand"; +import { + DisableOrganizationAdminAccountCommandInput, + DisableOrganizationAdminAccountCommandOutput, +} from "./commands/DisableOrganizationAdminAccountCommand"; import { DisassociateMembershipCommandInput, DisassociateMembershipCommandOutput, } from "./commands/DisassociateMembershipCommand"; +import { + EnableOrganizationAdminAccountCommandInput, + EnableOrganizationAdminAccountCommandOutput, +} from "./commands/EnableOrganizationAdminAccountCommand"; import { GetMembersCommandInput, GetMembersCommandOutput } from "./commands/GetMembersCommand"; import { ListGraphsCommandInput, ListGraphsCommandOutput } from "./commands/ListGraphsCommand"; import { ListInvitationsCommandInput, ListInvitationsCommandOutput } from "./commands/ListInvitationsCommand"; import { ListMembersCommandInput, ListMembersCommandOutput } from "./commands/ListMembersCommand"; +import { + ListOrganizationAdminAccountsCommandInput, + ListOrganizationAdminAccountsCommandOutput, +} from "./commands/ListOrganizationAdminAccountsCommand"; import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, @@ -73,6 +89,10 @@ import { } from "./commands/StartMonitoringMemberCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { + UpdateOrganizationConfigurationCommandInput, + UpdateOrganizationConfigurationCommandOutput, +} from "./commands/UpdateOrganizationConfigurationCommand"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = @@ -81,16 +101,21 @@ export type ServiceInputTypes = | CreateMembersCommandInput | DeleteGraphCommandInput | DeleteMembersCommandInput + | DescribeOrganizationConfigurationCommandInput + | DisableOrganizationAdminAccountCommandInput | DisassociateMembershipCommandInput + | EnableOrganizationAdminAccountCommandInput | GetMembersCommandInput | ListGraphsCommandInput | ListInvitationsCommandInput | ListMembersCommandInput + | ListOrganizationAdminAccountsCommandInput | ListTagsForResourceCommandInput | RejectInvitationCommandInput | StartMonitoringMemberCommandInput | TagResourceCommandInput - | UntagResourceCommandInput; + | UntagResourceCommandInput + | UpdateOrganizationConfigurationCommandInput; export type ServiceOutputTypes = | AcceptInvitationCommandOutput @@ -98,16 +123,21 @@ export type ServiceOutputTypes = | CreateMembersCommandOutput | DeleteGraphCommandOutput | DeleteMembersCommandOutput + | DescribeOrganizationConfigurationCommandOutput + | DisableOrganizationAdminAccountCommandOutput | DisassociateMembershipCommandOutput + | EnableOrganizationAdminAccountCommandOutput | GetMembersCommandOutput | ListGraphsCommandOutput | ListInvitationsCommandOutput | ListMembersCommandOutput + | ListOrganizationAdminAccountsCommandOutput | ListTagsForResourceCommandOutput | RejectInvitationCommandOutput | StartMonitoringMemberCommandOutput | TagResourceCommandOutput - | UntagResourceCommandOutput; + | UntagResourceCommandOutput + | UpdateOrganizationConfigurationCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** @@ -258,20 +288,32 @@ type DetectiveClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHan export interface DetectiveClientResolvedConfig extends DetectiveClientResolvedConfigType {} /** - *

          Detective uses machine learning and purpose-built visualizations to help you analyze and - * investigate security issues across your Amazon Web Services (AWS) workloads. Detective automatically - * extracts time-based events such as login attempts, API calls, and network traffic from - * AWS CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by - * Amazon GuardDuty.

          - *

          The Detective API primarily supports the creation and management of behavior graphs. A - * behavior graph contains the extracted data from a set of member accounts, and is created - * and managed by an administrator account.

          - *

          Every behavior graph is specific to a Region. You can only use the API to manage graphs - * that belong to the Region that is associated with the currently selected endpoint.

          - *

          A Detective administrator account can use the Detective API to do the following:

          + *

          Detective uses machine learning and purpose-built visualizations to help you to + * analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such + * as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by + * Amazon GuardDuty.

          + *

          The Detective API primarily supports the creation and management of behavior + * graphs. A behavior graph contains the extracted data from a set of member accounts, and is + * created and managed by an administrator account.

          + *

          To add a member account to the behavior graph, the administrator account sends an + * invitation to the account. When the account accepts the invitation, it becomes a member + * account in the behavior graph.

          + *

          Detective is also integrated with Organizations. The organization + * management account designates the Detective administrator account for the + * organization. That account becomes the administrator account for the organization behavior + * graph. The Detective administrator account can enable any organization account as + * a member account in the organization behavior graph. The organization accounts do not + * receive invitations. The Detective administrator account can also invite other + * accounts to the organization behavior graph.

          + *

          Every behavior graph is specific to a Region. You can only use the API to manage + * behavior graphs that belong to the Region that is associated with the currently selected + * endpoint.

          + *

          The administrator account for a behavior graph can use the Detective API to do + * the following:

          *
            *
          • - *

            Enable and disable Detective. Enabling Detective creates a new behavior graph.

            + *

            Enable and disable Detective. Enabling Detective creates a new + * behavior graph.

            *
          • *
          • *

            View the list of member accounts in a behavior graph.

            @@ -282,8 +324,23 @@ export interface DetectiveClientResolvedConfig extends DetectiveClientResolvedCo *
          • *

            Remove member accounts from a behavior graph.

            *
          • + *
          • + *

            Apply tags to a behavior graph.

            + *
          • + *
          + *

          The organization management account can use the Detective API to select the + * delegated administrator for Detective.

          + *

          The Detective administrator account for an organization can use the Detective API to do the following:

          + *
            + *
          • + *

            Perform all of the functions of an administrator account.

            + *
          • + *
          • + *

            Determine whether to automatically enable new organization accounts as member + * accounts in the organization behavior graph.

            + *
          • *
          - *

          A member account can use the Detective API to do the following:

          + *

          An invited member account can use the Detective API to do the following:

          *
            *
          • *

            View the list of behavior graphs that they are invited to.

            @@ -302,7 +359,8 @@ export interface DetectiveClientResolvedConfig extends DetectiveClientResolvedCo * *

            We replaced the term "master account" with the term "administrator account." An * administrator account is used to centrally manage multiple accounts. In the case of - * Detective, the administrator account manages the accounts in their behavior graph.

            + * Detective, the administrator account manages the accounts in their behavior + * graph.

            *
            */ export class DetectiveClient extends __Client< diff --git a/clients/client-detective/src/commands/CreateGraphCommand.ts b/clients/client-detective/src/commands/CreateGraphCommand.ts index 7ad57a4f6ae6..a97bb134c6c7 100644 --- a/clients/client-detective/src/commands/CreateGraphCommand.ts +++ b/clients/client-detective/src/commands/CreateGraphCommand.ts @@ -23,15 +23,15 @@ export interface CreateGraphCommandOutput extends CreateGraphResponse, __Metadat /** *

            Creates a new behavior graph for the calling account, and sets that account as the - * administrator account. This operation is called by the account that is enabling - * Detective.

            - *

            Before you try to enable Detective, make sure that your account has been enrolled in - * Amazon GuardDuty for at least 48 hours. If you do not meet this requirement, you cannot enable - * Detective. If you do meet the GuardDuty prerequisite, then when you make the request to enable - * Detective, it checks whether your data volume is within the Detective quota. If it exceeds the - * quota, then you cannot enable Detective.

            - *

            The operation also enables Detective for the calling account in the currently selected - * Region. It returns the ARN of the new behavior graph.

            + * administrator account. This operation is called by the account that is enabling Detective.

            + *

            Before you try to enable Detective, make sure that your account has been + * enrolled in Amazon GuardDuty for at least 48 hours. If you do not meet this + * requirement, you cannot enable Detective. If you do meet the GuardDuty + * prerequisite, then when you make the request to enable Detective, it checks + * whether your data volume is within the Detective quota. If it exceeds the quota, + * then you cannot enable Detective.

            + *

            The operation also enables Detective for the calling account in the currently + * selected Region. It returns the ARN of the new behavior graph.

            *

            * CreateGraph triggers a process to create the corresponding data tables for * the new behavior graph.

            diff --git a/clients/client-detective/src/commands/CreateMembersCommand.ts b/clients/client-detective/src/commands/CreateMembersCommand.ts index f27879fef8d9..b3e29a96e6a2 100644 --- a/clients/client-detective/src/commands/CreateMembersCommand.ts +++ b/clients/client-detective/src/commands/CreateMembersCommand.ts @@ -22,21 +22,31 @@ export interface CreateMembersCommandInput extends CreateMembersRequest {} export interface CreateMembersCommandOutput extends CreateMembersResponse, __MetadataBearer {} /** - *

            Sends a request to invite the specified AWS accounts to be member accounts in the - * behavior graph. This operation can only be called by the administrator account for a - * behavior graph.

            + *

            + * CreateMembers is used to send invitations to accounts. For the organization + * behavior graph, the Detective administrator account uses + * CreateMembers to enable organization accounts as member accounts.

            + *

            For invited accounts, CreateMembers sends a request to invite the specified + * Amazon Web Services accounts to be member accounts in the behavior graph. This operation + * can only be called by the administrator account for a behavior graph.

            *

            * CreateMembers verifies the accounts and then invites the verified accounts. * The administrator can optionally specify to not send invitation emails to the member * accounts. This would be used when the administrator manages their member accounts * centrally.

            - *

            The request provides the behavior graph ARN and the list of accounts to invite.

            + *

            For organization accounts in the organization behavior graph, CreateMembers + * attempts to enable the accounts. The organization accounts do not receive + * invitations.

            + *

            The request provides the behavior graph ARN and the list of accounts to invite or to + * enable.

            *

            The response separates the requested accounts into two lists:

            *
              *
            • - *

              The accounts that CreateMembers was able to start the verification - * for. This list includes member accounts that are being verified, that have passed - * verification and are to be invited, and that have failed verification.

              + *

              The accounts that CreateMembers was able to process. For invited + * accounts, includes member accounts that are being verified, that have passed + * verification and are to be invited, and that have failed verification. For + * organization accounts in the organization behavior graph, includes accounts that can + * be enabled and that cannot be enabled.

              *
            • *
            • *

              The accounts that CreateMembers was unable to process. This list diff --git a/clients/client-detective/src/commands/DeleteGraphCommand.ts b/clients/client-detective/src/commands/DeleteGraphCommand.ts index 876ff9599f1e..c7156eb880ec 100644 --- a/clients/client-detective/src/commands/DeleteGraphCommand.ts +++ b/clients/client-detective/src/commands/DeleteGraphCommand.ts @@ -23,7 +23,7 @@ export interface DeleteGraphCommandOutput extends __MetadataBearer {} /** *

              Disables the specified behavior graph and queues it to be deleted. This operation - * removes the graph from each member account's list of behavior graphs.

              + * removes the behavior graph from each member account's list of behavior graphs.

              *

              * DeleteGraph can only be called by the administrator account for a behavior * graph.

              diff --git a/clients/client-detective/src/commands/DeleteMembersCommand.ts b/clients/client-detective/src/commands/DeleteMembersCommand.ts index d8eb39c60214..435bd54d789d 100644 --- a/clients/client-detective/src/commands/DeleteMembersCommand.ts +++ b/clients/client-detective/src/commands/DeleteMembersCommand.ts @@ -22,11 +22,19 @@ export interface DeleteMembersCommandInput extends DeleteMembersRequest {} export interface DeleteMembersCommandOutput extends DeleteMembersResponse, __MetadataBearer {} /** - *

              Deletes one or more member accounts from the administrator account's behavior graph. - * This operation can only be called by a Detective administrator account. That account cannot use - * DeleteMembers to delete their own account from the behavior graph. To - * disable a behavior graph, the administrator account uses the DeleteGraph API - * method.

              + *

              Removes the specified member accounts from the behavior graph. The removed accounts no + * longer contribute data to the behavior graph. This operation can only be called by the + * administrator account for the behavior graph.

              + *

              For invited accounts, the removed accounts are deleted from the list of accounts in the + * behavior graph. To restore the account, the administrator account must send another + * invitation.

              + *

              For organization accounts in the organization behavior graph, the Detective + * administrator account can always enable the organization account again. Organization + * accounts that are not enabled as member accounts are not included in the + * ListMembers results for the organization behavior graph.

              + *

              An administrator account cannot use DeleteMembers to remove their own + * account from the behavior graph. To disable a behavior graph, the administrator account + * uses the DeleteGraph API method.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-detective/src/commands/DescribeOrganizationConfigurationCommand.ts b/clients/client-detective/src/commands/DescribeOrganizationConfigurationCommand.ts new file mode 100644 index 000000000000..df1cd697c988 --- /dev/null +++ b/clients/client-detective/src/commands/DescribeOrganizationConfigurationCommand.ts @@ -0,0 +1,109 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DetectiveClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DetectiveClient"; +import { + DescribeOrganizationConfigurationRequest, + DescribeOrganizationConfigurationResponse, +} from "../models/models_0"; +import { + deserializeAws_restJson1DescribeOrganizationConfigurationCommand, + serializeAws_restJson1DescribeOrganizationConfigurationCommand, +} from "../protocols/Aws_restJson1"; + +export interface DescribeOrganizationConfigurationCommandInput extends DescribeOrganizationConfigurationRequest {} +export interface DescribeOrganizationConfigurationCommandOutput + extends DescribeOrganizationConfigurationResponse, + __MetadataBearer {} + +/** + *

              Returns information about the configuration for the organization behavior graph. + * Currently indicates whether to automatically enable new organization accounts as member + * accounts.

              + *

              Can only be called by the Detective administrator account for the organization.

              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DetectiveClient, DescribeOrganizationConfigurationCommand } from "@aws-sdk/client-detective"; // ES Modules import + * // const { DetectiveClient, DescribeOrganizationConfigurationCommand } = require("@aws-sdk/client-detective"); // CommonJS import + * const client = new DetectiveClient(config); + * const command = new DescribeOrganizationConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeOrganizationConfigurationCommandInput} for command's `input` shape. + * @see {@link DescribeOrganizationConfigurationCommandOutput} for command's `response` shape. + * @see {@link DetectiveClientResolvedConfig | config} for DetectiveClient's `config` shape. + * + */ +export class DescribeOrganizationConfigurationCommand extends $Command< + DescribeOrganizationConfigurationCommandInput, + DescribeOrganizationConfigurationCommandOutput, + DetectiveClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeOrganizationConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DetectiveClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DetectiveClient"; + const commandName = "DescribeOrganizationConfigurationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeOrganizationConfigurationRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeOrganizationConfigurationResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DescribeOrganizationConfigurationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1DescribeOrganizationConfigurationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1DescribeOrganizationConfigurationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-detective/src/commands/DisableOrganizationAdminAccountCommand.ts b/clients/client-detective/src/commands/DisableOrganizationAdminAccountCommand.ts new file mode 100644 index 000000000000..f9e864802909 --- /dev/null +++ b/clients/client-detective/src/commands/DisableOrganizationAdminAccountCommand.ts @@ -0,0 +1,104 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DetectiveClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DetectiveClient"; +import { + deserializeAws_restJson1DisableOrganizationAdminAccountCommand, + serializeAws_restJson1DisableOrganizationAdminAccountCommand, +} from "../protocols/Aws_restJson1"; + +export interface DisableOrganizationAdminAccountCommandInput {} +export interface DisableOrganizationAdminAccountCommandOutput extends __MetadataBearer {} + +/** + *

              Removes the Detective administrator account for the organization in the current + * Region. Deletes the behavior graph for that account.

              + *

              Can only be called by the organization management account. Before you can select a + * different Detective administrator account, you must remove the Detective + * administrator account in all Regions.

              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DetectiveClient, DisableOrganizationAdminAccountCommand } from "@aws-sdk/client-detective"; // ES Modules import + * // const { DetectiveClient, DisableOrganizationAdminAccountCommand } = require("@aws-sdk/client-detective"); // CommonJS import + * const client = new DetectiveClient(config); + * const command = new DisableOrganizationAdminAccountCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisableOrganizationAdminAccountCommandInput} for command's `input` shape. + * @see {@link DisableOrganizationAdminAccountCommandOutput} for command's `response` shape. + * @see {@link DetectiveClientResolvedConfig | config} for DetectiveClient's `config` shape. + * + */ +export class DisableOrganizationAdminAccountCommand extends $Command< + DisableOrganizationAdminAccountCommandInput, + DisableOrganizationAdminAccountCommandOutput, + DetectiveClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisableOrganizationAdminAccountCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DetectiveClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DetectiveClient"; + const commandName = "DisableOrganizationAdminAccountCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: (input: any) => input, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DisableOrganizationAdminAccountCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1DisableOrganizationAdminAccountCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1DisableOrganizationAdminAccountCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-detective/src/commands/DisassociateMembershipCommand.ts b/clients/client-detective/src/commands/DisassociateMembershipCommand.ts index ad0c313793c6..b68329496aca 100644 --- a/clients/client-detective/src/commands/DisassociateMembershipCommand.ts +++ b/clients/client-detective/src/commands/DisassociateMembershipCommand.ts @@ -23,7 +23,12 @@ export interface DisassociateMembershipCommandOutput extends __MetadataBearer {} /** *

              Removes the member account from the specified behavior graph. This operation can only be - * called by a member account that has the ENABLED status.

              + * called by an invited member account that has the ENABLED status.

              + *

              + * DisassociateMembership cannot be called by an organization account in the + * organization behavior graph. For the organization behavior graph, the Detective + * administrator account determines which organization accounts to enable or disable as member + * accounts.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-detective/src/commands/EnableOrganizationAdminAccountCommand.ts b/clients/client-detective/src/commands/EnableOrganizationAdminAccountCommand.ts new file mode 100644 index 000000000000..4f5870ca0951 --- /dev/null +++ b/clients/client-detective/src/commands/EnableOrganizationAdminAccountCommand.ts @@ -0,0 +1,108 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DetectiveClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DetectiveClient"; +import { EnableOrganizationAdminAccountRequest } from "../models/models_0"; +import { + deserializeAws_restJson1EnableOrganizationAdminAccountCommand, + serializeAws_restJson1EnableOrganizationAdminAccountCommand, +} from "../protocols/Aws_restJson1"; + +export interface EnableOrganizationAdminAccountCommandInput extends EnableOrganizationAdminAccountRequest {} +export interface EnableOrganizationAdminAccountCommandOutput extends __MetadataBearer {} + +/** + *

              Designates the Detective administrator account for the organization in the + * current Region.

              + *

              If the account does not have Detective enabled, then enables Detective + * for that account and creates a new behavior graph.

              + *

              Can only be called by the organization management account.

              + *

              The Detective administrator account for an organization must be the same in all + * Regions. If you already designated a Detective administrator account in another + * Region, then you must designate the same account.

              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DetectiveClient, EnableOrganizationAdminAccountCommand } from "@aws-sdk/client-detective"; // ES Modules import + * // const { DetectiveClient, EnableOrganizationAdminAccountCommand } = require("@aws-sdk/client-detective"); // CommonJS import + * const client = new DetectiveClient(config); + * const command = new EnableOrganizationAdminAccountCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link EnableOrganizationAdminAccountCommandInput} for command's `input` shape. + * @see {@link EnableOrganizationAdminAccountCommandOutput} for command's `response` shape. + * @see {@link DetectiveClientResolvedConfig | config} for DetectiveClient's `config` shape. + * + */ +export class EnableOrganizationAdminAccountCommand extends $Command< + EnableOrganizationAdminAccountCommandInput, + EnableOrganizationAdminAccountCommandOutput, + DetectiveClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: EnableOrganizationAdminAccountCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DetectiveClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DetectiveClient"; + const commandName = "EnableOrganizationAdminAccountCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: EnableOrganizationAdminAccountRequest.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: EnableOrganizationAdminAccountCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1EnableOrganizationAdminAccountCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1EnableOrganizationAdminAccountCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-detective/src/commands/ListInvitationsCommand.ts b/clients/client-detective/src/commands/ListInvitationsCommand.ts index 93dfa9d263e0..7b1ece55225f 100644 --- a/clients/client-detective/src/commands/ListInvitationsCommand.ts +++ b/clients/client-detective/src/commands/ListInvitationsCommand.ts @@ -23,7 +23,7 @@ export interface ListInvitationsCommandOutput extends ListInvitationsResponse, _ /** *

              Retrieves the list of open and accepted behavior graph invitations for the member - * account. This operation can only be called by a member account.

              + * account. This operation can only be called by an invited member account.

              *

              Open invitations are invitations that the member account has not responded to.

              *

              The results do not include behavior graphs for which the member account declined the * invitation. The results also do not include behavior graphs that the member account diff --git a/clients/client-detective/src/commands/ListMembersCommand.ts b/clients/client-detective/src/commands/ListMembersCommand.ts index 445b1c85d23a..62a5bdf599df 100644 --- a/clients/client-detective/src/commands/ListMembersCommand.ts +++ b/clients/client-detective/src/commands/ListMembersCommand.ts @@ -22,8 +22,12 @@ export interface ListMembersCommandInput extends ListMembersRequest {} export interface ListMembersCommandOutput extends ListMembersResponse, __MetadataBearer {} /** - *

              Retrieves the list of member accounts for a behavior graph. Does not return member - * accounts that were removed from the behavior graph.

              + *

              Retrieves the list of member accounts for a behavior graph.

              + *

              For invited accounts, the results do not include member accounts that were removed from + * the behavior graph.

              + *

              For the organization behavior graph, the results do not include organization accounts + * that the Detective administrator account has not enabled as member + * accounts.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-detective/src/commands/ListOrganizationAdminAccountsCommand.ts b/clients/client-detective/src/commands/ListOrganizationAdminAccountsCommand.ts new file mode 100644 index 000000000000..59b44999d81b --- /dev/null +++ b/clients/client-detective/src/commands/ListOrganizationAdminAccountsCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DetectiveClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DetectiveClient"; +import { ListOrganizationAdminAccountsRequest, ListOrganizationAdminAccountsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListOrganizationAdminAccountsCommand, + serializeAws_restJson1ListOrganizationAdminAccountsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListOrganizationAdminAccountsCommandInput extends ListOrganizationAdminAccountsRequest {} +export interface ListOrganizationAdminAccountsCommandOutput + extends ListOrganizationAdminAccountsResponse, + __MetadataBearer {} + +/** + *

              Returns information about the Detective administrator account for an + * organization. Can only be called by the organization management account.

              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DetectiveClient, ListOrganizationAdminAccountsCommand } from "@aws-sdk/client-detective"; // ES Modules import + * // const { DetectiveClient, ListOrganizationAdminAccountsCommand } = require("@aws-sdk/client-detective"); // CommonJS import + * const client = new DetectiveClient(config); + * const command = new ListOrganizationAdminAccountsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListOrganizationAdminAccountsCommandInput} for command's `input` shape. + * @see {@link ListOrganizationAdminAccountsCommandOutput} for command's `response` shape. + * @see {@link DetectiveClientResolvedConfig | config} for DetectiveClient's `config` shape. + * + */ +export class ListOrganizationAdminAccountsCommand extends $Command< + ListOrganizationAdminAccountsCommandInput, + ListOrganizationAdminAccountsCommandOutput, + DetectiveClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListOrganizationAdminAccountsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DetectiveClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DetectiveClient"; + const commandName = "ListOrganizationAdminAccountsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListOrganizationAdminAccountsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListOrganizationAdminAccountsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListOrganizationAdminAccountsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListOrganizationAdminAccountsCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1ListOrganizationAdminAccountsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-detective/src/commands/RejectInvitationCommand.ts b/clients/client-detective/src/commands/RejectInvitationCommand.ts index 42597ef0e82a..b66b645a56c9 100644 --- a/clients/client-detective/src/commands/RejectInvitationCommand.ts +++ b/clients/client-detective/src/commands/RejectInvitationCommand.ts @@ -23,7 +23,12 @@ export interface RejectInvitationCommandOutput extends __MetadataBearer {} /** *

              Rejects an invitation to contribute the account data to a behavior graph. This operation - * must be called by a member account that has the INVITED status.

              + * must be called by an invited member account that has the INVITED + * status.

              + *

              + * RejectInvitation cannot be called by an organization account in the + * organization behavior graph. In the organization behavior graph, organization accounts do + * not receive an invitation.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-detective/src/commands/StartMonitoringMemberCommand.ts b/clients/client-detective/src/commands/StartMonitoringMemberCommand.ts index 908dba842455..4965afa1e8f8 100644 --- a/clients/client-detective/src/commands/StartMonitoringMemberCommand.ts +++ b/clients/client-detective/src/commands/StartMonitoringMemberCommand.ts @@ -28,7 +28,7 @@ export interface StartMonitoringMemberCommandOutput extends __MetadataBearer {} *
                *
              • *

                If Detective enabled the member account, then the new status is - * ENABLED.

                + * ENABLED.

                *
              • *
              • *

                If Detective cannot enable the member account, the status remains diff --git a/clients/client-detective/src/commands/UpdateOrganizationConfigurationCommand.ts b/clients/client-detective/src/commands/UpdateOrganizationConfigurationCommand.ts new file mode 100644 index 000000000000..2528c29fc0c4 --- /dev/null +++ b/clients/client-detective/src/commands/UpdateOrganizationConfigurationCommand.ts @@ -0,0 +1,103 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DetectiveClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DetectiveClient"; +import { UpdateOrganizationConfigurationRequest } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateOrganizationConfigurationCommand, + serializeAws_restJson1UpdateOrganizationConfigurationCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateOrganizationConfigurationCommandInput extends UpdateOrganizationConfigurationRequest {} +export interface UpdateOrganizationConfigurationCommandOutput extends __MetadataBearer {} + +/** + *

                Updates the configuration for the Organizations integration in the current Region. + * Can only be called by the Detective administrator account for the + * organization.

                + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DetectiveClient, UpdateOrganizationConfigurationCommand } from "@aws-sdk/client-detective"; // ES Modules import + * // const { DetectiveClient, UpdateOrganizationConfigurationCommand } = require("@aws-sdk/client-detective"); // CommonJS import + * const client = new DetectiveClient(config); + * const command = new UpdateOrganizationConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateOrganizationConfigurationCommandInput} for command's `input` shape. + * @see {@link UpdateOrganizationConfigurationCommandOutput} for command's `response` shape. + * @see {@link DetectiveClientResolvedConfig | config} for DetectiveClient's `config` shape. + * + */ +export class UpdateOrganizationConfigurationCommand extends $Command< + UpdateOrganizationConfigurationCommandInput, + UpdateOrganizationConfigurationCommandOutput, + DetectiveClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateOrganizationConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DetectiveClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DetectiveClient"; + const commandName = "UpdateOrganizationConfigurationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateOrganizationConfigurationRequest.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: UpdateOrganizationConfigurationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateOrganizationConfigurationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1UpdateOrganizationConfigurationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-detective/src/commands/index.ts b/clients/client-detective/src/commands/index.ts index 263665ddccc8..11dda1979923 100644 --- a/clients/client-detective/src/commands/index.ts +++ b/clients/client-detective/src/commands/index.ts @@ -3,13 +3,18 @@ export * from "./CreateGraphCommand"; export * from "./CreateMembersCommand"; export * from "./DeleteGraphCommand"; export * from "./DeleteMembersCommand"; +export * from "./DescribeOrganizationConfigurationCommand"; +export * from "./DisableOrganizationAdminAccountCommand"; export * from "./DisassociateMembershipCommand"; +export * from "./EnableOrganizationAdminAccountCommand"; export * from "./GetMembersCommand"; export * from "./ListGraphsCommand"; export * from "./ListInvitationsCommand"; export * from "./ListMembersCommand"; +export * from "./ListOrganizationAdminAccountsCommand"; export * from "./ListTagsForResourceCommand"; export * from "./RejectInvitationCommand"; export * from "./StartMonitoringMemberCommand"; export * from "./TagResourceCommand"; export * from "./UntagResourceCommand"; +export * from "./UpdateOrganizationConfigurationCommand"; diff --git a/clients/client-detective/src/models/models_0.ts b/clients/client-detective/src/models/models_0.ts index f7ccb5a274ca..9efa969b23b8 100644 --- a/clients/client-detective/src/models/models_0.ts +++ b/clients/client-detective/src/models/models_0.ts @@ -91,17 +91,18 @@ export namespace ValidationException { } /** - *

                An AWS account that is the administrator account of or a member of a behavior - * graph.

                + *

                An Amazon Web Services account that is the administrator account of or a member of a + * behavior graph.

                */ export interface Account { /** - *

                The account identifier of the AWS account.

                + *

                The account identifier of the Amazon Web Services account.

                */ AccountId: string | undefined; /** - *

                The AWS account root user email address for the AWS account.

                + *

                The Amazon Web Services account root user email address for the Amazon Web Services + * account.

                */ EmailAddress: string | undefined; } @@ -115,6 +116,39 @@ export namespace Account { }); } +/** + *

                Information about the Detective administrator account for an + * organization.

                + */ +export interface Administrator { + /** + *

                The Amazon Web Services account identifier of the Detective administrator + * account for the organization.

                + */ + AccountId?: string; + + /** + *

                The ARN of the organization behavior graph.

                + */ + GraphArn?: string; + + /** + *

                The date and time when the Detective administrator account was enabled. The + * value is an ISO8601 formatted string. For example, + * 2021-08-18T16:35:56.284Z.

                + */ + DelegationTime?: Date; +} + +export namespace Administrator { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Administrator): any => ({ + ...obj, + }); +} + export interface CreateGraphRequest { /** *

                The tags to assign to the new behavior graph. You can add up to 50 tags. For each tag, @@ -154,7 +188,7 @@ export namespace CreateGraphResponse { *

                  *
                • *

                  The request would cause the number of member accounts in the behavior graph to - * exceed the maximum allowed. A behavior graph cannot have more than 1000 member + * exceed the maximum allowed. A behavior graph cannot have more than 1200 member * accounts.

                  *
                • *
                • @@ -162,8 +196,8 @@ export namespace CreateGraphResponse { * allowed.

                  *
                • *
                • - *

                  Detective is unable to verify the data rate for the member account. This is usually - * because the member account is not enrolled in Amazon GuardDuty.

                  + *

                  Detective is unable to verify the data rate for the member account. This + * is usually because the member account is not enrolled in Amazon GuardDuty.

                  *
                • *
                */ @@ -184,8 +218,7 @@ export namespace ServiceQuotaExceededException { export interface CreateMembersRequest { /** - *

                The ARN of the behavior graph to invite the member accounts to contribute their data - * to.

                + *

                The ARN of the behavior graph.

                */ GraphArn: string | undefined; @@ -196,16 +229,20 @@ export interface CreateMembersRequest { Message?: string; /** - *

                if set to true, then the member accounts do not receive email - * notifications. By default, this is set to false, and the member accounts + *

                if set to true, then the invited accounts do not receive email + * notifications. By default, this is set to false, and the invited accounts * receive email notifications.

                + *

                Organization accounts in the organization behavior graph do not receive email + * notifications.

                */ DisableEmailNotification?: boolean; /** - *

                The list of AWS accounts to invite to become member accounts in the behavior graph. - * You can invite up to 50 accounts at a time. For each invited account, the account list - * contains the account identifier and the AWS account root user email address.

                + *

                The list of Amazon Web Services accounts to invite or to enable. You can invite or enable + * up to 50 accounts at a time. For each invited account, the account list contains the + * account identifier and the Amazon Web Services account root user email address. For + * organization accounts in the organization behavior graph, the email address is not + * required.

                */ Accounts: Account[] | undefined; } @@ -224,6 +261,11 @@ export enum MemberDisabledReason { VOLUME_UNKNOWN = "VOLUME_UNKNOWN", } +export enum InvitationType { + INVITATION = "INVITATION", + ORGANIZATION = "ORGANIZATION", +} + export enum MemberStatus { ACCEPTED_BUT_DISABLED = "ACCEPTED_BUT_DISABLED", ENABLED = "ENABLED", @@ -233,34 +275,35 @@ export enum MemberStatus { } /** - *

                Details about a member account that was invited to contribute to a behavior - * graph.

                + *

                Details about a member account in a behavior graph.

                */ export interface MemberDetail { /** - *

                The AWS account identifier for the member account.

                + *

                The Amazon Web Services account identifier for the member account.

                */ AccountId?: string; /** - *

                The AWS account root user email address for the member account.

                + *

                The Amazon Web Services account root user email address for the member account.

                */ EmailAddress?: string; /** - *

                The ARN of the behavior graph that the member account was invited to.

                + *

                The ARN of the behavior graph.

                */ GraphArn?: string; /** * @deprecated * - *

                The AWS account identifier of the administrator account for the behavior graph.

                + *

                The Amazon Web Services account identifier of the administrator account for the behavior + * graph.

                */ MasterId?: string; /** - *

                The AWS account identifier of the administrator account for the behavior graph.

                + *

                The Amazon Web Services account identifier of the administrator account for the behavior + * graph.

                */ AdministratorId?: string; @@ -270,37 +313,43 @@ export interface MemberDetail { *
                  *
                • *

                  - * INVITED - Indicates that the member was sent an invitation but has - * not yet responded.

                  + * INVITED - For invited accounts only. Indicates that the member was + * sent an invitation but has not yet responded.

                  *
                • *
                • *

                  - * VERIFICATION_IN_PROGRESS - Indicates that Detective is verifying that the - * account identifier and email address provided for the member account match. If they - * do match, then Detective sends the invitation. If the email address and account - * identifier don't match, then the member cannot be added to the behavior graph.

                  + * VERIFICATION_IN_PROGRESS - For invited accounts only, indicates that + * Detective is verifying that the account identifier and email address + * provided for the member account match. If they do match, then Detective + * sends the invitation. If the email address and account identifier don't match, then + * the member cannot be added to the behavior graph.

                  + *

                  For organization accounts in the organization behavior graph, indicates that + * Detective is verifying that the account belongs to the + * organization.

                  *
                • *
                • *

                  - * VERIFICATION_FAILED - Indicates that the account and email address - * provided for the member account do not match, and Detective did not send an invitation to - * the account.

                  + * VERIFICATION_FAILED - For invited accounts only. Indicates that the + * account and email address provided for the member account do not match, and Detective did not send an invitation to the account.

                  *
                • *
                • *

                  - * ENABLED - Indicates that the member account accepted the invitation - * to contribute to the behavior graph.

                  + * ENABLED - Indicates that the member account currently contributes + * data to the behavior graph. For invited accounts, the member account accepted the + * invitation. For organization accounts in the organization behavior graph, the Detective administrator account enabled the organization account as a member + * account.

                  *
                • *
                • *

                  - * ACCEPTED_BUT_DISABLED - Indicates that the member account accepted - * the invitation but is prevented from contributing data to the behavior graph. - * DisabledReason provides the reason why the member account is not - * enabled.

                  + * ACCEPTED_BUT_DISABLED - The account accepted the invitation, or was + * enabled by the Detective administrator account, but is prevented from + * contributing data to the behavior graph. DisabledReason provides the + * reason why the member account is not enabled.

                  *
                • *
                - *

                Member accounts that declined an invitation or that were removed from the behavior graph - * are not included.

                + *

                Invited accounts that declined an invitation or that were removed from the behavior + * graph are not included. In the organization behavior graph, organization accounts that the + * Detective administrator account did not enable are not included.

                */ Status?: MemberStatus | string; @@ -316,23 +365,24 @@ export interface MemberDetail { *
              • *
              • *

                - * VOLUME_UNKNOWN - Indicates that Detective is unable to verify the data - * volume for the member account. This is usually because the member account is not - * enrolled in Amazon GuardDuty.

                + * VOLUME_UNKNOWN - Indicates that Detective is unable to + * verify the data volume for the member account. This is usually because the member + * account is not enrolled in Amazon GuardDuty.

                *
              • *
              */ DisabledReason?: MemberDisabledReason | string; /** - *

              The date and time that Detective sent the invitation to the member account. The value is in - * milliseconds since the epoch.

              + *

              For invited accounts, the date and time that Detective sent the invitation to + * the account. The value is an ISO8601 formatted string. For example, + * 2021-08-18T16:35:56.284Z.

              */ InvitedTime?: Date; /** - *

              The date and time that the member account was last updated. The value is in milliseconds - * since the epoch.

              + *

              The date and time that the member account was last updated. The value is an ISO8601 + * formatted string. For example, 2021-08-18T16:35:56.284Z.

              */ UpdatedTime?: Date; @@ -342,7 +392,8 @@ export interface MemberDetail { VolumeUsageInBytes?: number; /** - *

              The data and time when the member account data volume was last updated.

              + *

              The data and time when the member account data volume was last updated. The value is an + * ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

              */ VolumeUsageUpdatedTime?: Date; @@ -362,9 +413,19 @@ export interface MemberDetail { /** * @deprecated * - *

              The date and time when the graph utilization percentage was last updated.

              + *

              The date and time when the graph utilization percentage was last updated. The value is + * an ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

              */ PercentOfGraphUtilizationUpdatedTime?: Date; + + /** + *

              The type of behavior graph membership.

              + *

              For an organization account in the organization behavior graph, the type is + * ORGANIZATION.

              + *

              For an account that was invited to a behavior graph, the type is + * INVITATION.

              + */ + InvitationType?: InvitationType | string; } export namespace MemberDetail { @@ -382,7 +443,8 @@ export namespace MemberDetail { */ export interface UnprocessedAccount { /** - *

              The AWS account identifier of the member account that was not processed.

              + *

              The Amazon Web Services account identifier of the member account that was not + * processed.

              */ AccountId?: string; @@ -403,16 +465,17 @@ export namespace UnprocessedAccount { export interface CreateMembersResponse { /** - *

              The set of member account invitation requests that Detective was able to process. This - * includes accounts that are being verified, that failed verification, and that passed - * verification and are being sent an invitation.

              + *

              The set of member account invitation or enablement requests that Detective was + * able to process. This includes accounts that are being verified, that failed verification, + * and that passed verification and are being sent an invitation or are being enabled.

              */ Members?: MemberDetail[]; /** - *

              The list of accounts for which Detective was unable to process the invitation request. For - * each account, the list provides the reason why the request could not be processed. The list - * includes accounts that are already member accounts in the behavior graph.

              + *

              The list of accounts for which Detective was unable to process the invitation + * or enablement request. For each account, the list provides the reason why the request could + * not be processed. The list includes accounts that are already member accounts in the + * behavior graph.

              */ UnprocessedAccounts?: UnprocessedAccount[]; } @@ -444,13 +507,13 @@ export namespace DeleteGraphRequest { export interface DeleteMembersRequest { /** - *

              The ARN of the behavior graph to delete members from.

              + *

              The ARN of the behavior graph to remove members from.

              */ GraphArn: string | undefined; /** - *

              The list of AWS account identifiers for the member accounts to delete from the - * behavior graph. You can delete up to 50 member accounts at a time.

              + *

              The list of Amazon Web Services account identifiers for the member accounts to remove + * from the behavior graph. You can remove up to 50 member accounts at a time.

              */ AccountIds: string[] | undefined; } @@ -466,14 +529,13 @@ export namespace DeleteMembersRequest { export interface DeleteMembersResponse { /** - *

              The list of AWS account identifiers for the member accounts that Detective successfully - * deleted from the behavior graph.

              + *

              The list of Amazon Web Services account identifiers for the member accounts that Detective successfully removed from the behavior graph.

              */ AccountIds?: string[]; /** - *

              The list of member accounts that Detective was not able to delete from the behavior graph. - * For each member account, provides the reason that the deletion could not be + *

              The list of member accounts that Detective was not able to remove from the + * behavior graph. For each member account, provides the reason that the deletion could not be * processed.

              */ UnprocessedAccounts?: UnprocessedAccount[]; @@ -488,6 +550,58 @@ export namespace DeleteMembersResponse { }); } +export interface DescribeOrganizationConfigurationRequest { + /** + *

              The ARN of the organization behavior graph.

              + */ + GraphArn: string | undefined; +} + +export namespace DescribeOrganizationConfigurationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeOrganizationConfigurationRequest): any => ({ + ...obj, + }); +} + +export interface DescribeOrganizationConfigurationResponse { + /** + *

              Indicates whether to automatically enable new organization accounts as member accounts + * in the organization behavior graph.

              + */ + AutoEnable?: boolean; +} + +export namespace DescribeOrganizationConfigurationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeOrganizationConfigurationResponse): any => ({ + ...obj, + }); +} + +/** + *

              The request cannot be completed because too many other requests are occurring at the + * same time.

              + */ +export interface TooManyRequestsException extends __SmithyException, $MetadataBearer { + name: "TooManyRequestsException"; + $fault: "client"; + Message?: string; +} + +export namespace TooManyRequestsException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TooManyRequestsException): any => ({ + ...obj, + }); +} + export interface DisassociateMembershipRequest { /** *

              The ARN of the behavior graph to remove the member account from.

              @@ -506,6 +620,22 @@ export namespace DisassociateMembershipRequest { }); } +export interface EnableOrganizationAdminAccountRequest { + /** + *

              The Amazon Web Services account identifier of the account to designate as the Detective administrator account for the organization.

              + */ + AccountId: string | undefined; +} + +export namespace EnableOrganizationAdminAccountRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnableOrganizationAdminAccountRequest): any => ({ + ...obj, + }); +} + export interface GetMembersRequest { /** *

              The ARN of the behavior graph for which to request the member details.

              @@ -513,8 +643,9 @@ export interface GetMembersRequest { GraphArn: string | undefined; /** - *

              The list of AWS account identifiers for the member account for which to return member - * details. You can request details for up to 50 member accounts at a time.

              + *

              The list of Amazon Web Services account identifiers for the member account for which to + * return member details. You can request details for up to 50 member accounts at a + * time.

              *

              You cannot use GetMembers to retrieve information about member accounts * that were removed from the behavior graph.

              */ @@ -532,7 +663,8 @@ export namespace GetMembersRequest { export interface GetMembersResponse { /** - *

              The member account details that Detective is returning in response to the request.

              + *

              The member account details that Detective is returning in response to the + * request.

              */ MemberDetails?: MemberDetail[]; @@ -587,8 +719,8 @@ export interface Graph { Arn?: string; /** - *

              The date and time that the behavior graph was created. The value is in milliseconds - * since the epoch.

              + *

              The date and time that the behavior graph was created. The value is an ISO8601 formatted + * string. For example, 2021-08-18T16:35:56.284Z.

              */ CreatedTime?: Date; } @@ -704,15 +836,18 @@ export namespace ListMembersRequest { export interface ListMembersResponse { /** *

              The list of member accounts in the behavior graph.

              - *

              The results include member accounts that did not pass verification and member accounts - * that have not yet accepted the invitation to the behavior graph. The results do not include - * member accounts that were removed from the behavior graph.

              + *

              For invited accounts, the results include member accounts that did not pass verification + * and member accounts that have not yet accepted the invitation to the behavior graph. The + * results do not include member accounts that were removed from the behavior graph.

              + *

              For the organization behavior graph, the results do not include organization accounts + * that the Detective administrator account has not enabled as member + * accounts.

              */ MemberDetails?: MemberDetail[]; /** - *

              If there are more member accounts remaining in the results, then this is the pagination - * token to use to request the next page of member accounts.

              + *

              If there are more member accounts remaining in the results, then use this pagination + * token to request the next page of member accounts.

              */ NextToken?: string; } @@ -726,6 +861,51 @@ export namespace ListMembersResponse { }); } +export interface ListOrganizationAdminAccountsRequest { + /** + *

              For requests to get the next page of results, the pagination token that was returned + * with the previous set of results. The initial request does not include a pagination + * token.

              + */ + NextToken?: string; + + /** + *

              The maximum number of results to return.

              + */ + MaxResults?: number; +} + +export namespace ListOrganizationAdminAccountsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListOrganizationAdminAccountsRequest): any => ({ + ...obj, + }); +} + +export interface ListOrganizationAdminAccountsResponse { + /** + *

              The list of delegated administrator accounts.

              + */ + Administrators?: Administrator[]; + + /** + *

              If there are more accounts remaining in the results, then this is the pagination token + * to use to request the next page of accounts.

              + */ + NextToken?: string; +} + +export namespace ListOrganizationAdminAccountsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListOrganizationAdminAccountsResponse): any => ({ + ...obj, + }); +} + export interface ListTagsForResourceRequest { /** *

              The ARN of the behavior graph for which to retrieve the tag values.

              @@ -866,3 +1046,25 @@ export namespace UntagResourceResponse { ...obj, }); } + +export interface UpdateOrganizationConfigurationRequest { + /** + *

              The ARN of the organization behavior graph.

              + */ + GraphArn: string | undefined; + + /** + *

              Indicates whether to automatically enable new organization accounts as member accounts + * in the organization behavior graph.

              + */ + AutoEnable?: boolean; +} + +export namespace UpdateOrganizationConfigurationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateOrganizationConfigurationRequest): any => ({ + ...obj, + }); +} diff --git a/clients/client-detective/src/pagination/ListOrganizationAdminAccountsPaginator.ts b/clients/client-detective/src/pagination/ListOrganizationAdminAccountsPaginator.ts new file mode 100644 index 000000000000..c7b50154c5d7 --- /dev/null +++ b/clients/client-detective/src/pagination/ListOrganizationAdminAccountsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListOrganizationAdminAccountsCommand, + ListOrganizationAdminAccountsCommandInput, + ListOrganizationAdminAccountsCommandOutput, +} from "../commands/ListOrganizationAdminAccountsCommand"; +import { Detective } from "../Detective"; +import { DetectiveClient } from "../DetectiveClient"; +import { DetectivePaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: DetectiveClient, + input: ListOrganizationAdminAccountsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListOrganizationAdminAccountsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Detective, + input: ListOrganizationAdminAccountsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listOrganizationAdminAccounts(input, ...args); +}; +export async function* paginateListOrganizationAdminAccounts( + config: DetectivePaginationConfiguration, + input: ListOrganizationAdminAccountsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListOrganizationAdminAccountsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof Detective) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof DetectiveClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Detective | DetectiveClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-detective/src/pagination/index.ts b/clients/client-detective/src/pagination/index.ts index 3c8ead26f1d2..e350a39932e8 100644 --- a/clients/client-detective/src/pagination/index.ts +++ b/clients/client-detective/src/pagination/index.ts @@ -2,3 +2,4 @@ export * from "./Interfaces"; export * from "./ListGraphsPaginator"; export * from "./ListInvitationsPaginator"; export * from "./ListMembersPaginator"; +export * from "./ListOrganizationAdminAccountsPaginator"; diff --git a/clients/client-detective/src/protocols/Aws_restJson1.ts b/clients/client-detective/src/protocols/Aws_restJson1.ts index a764b97daadf..43c0c914a109 100644 --- a/clients/client-detective/src/protocols/Aws_restJson1.ts +++ b/clients/client-detective/src/protocols/Aws_restJson1.ts @@ -1,5 +1,6 @@ import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; import { + expectBoolean as __expectBoolean, expectLong as __expectLong, expectNonNull as __expectNonNull, expectObject as __expectObject, @@ -21,14 +22,30 @@ import { CreateGraphCommandInput, CreateGraphCommandOutput } from "../commands/C import { CreateMembersCommandInput, CreateMembersCommandOutput } from "../commands/CreateMembersCommand"; import { DeleteGraphCommandInput, DeleteGraphCommandOutput } from "../commands/DeleteGraphCommand"; import { DeleteMembersCommandInput, DeleteMembersCommandOutput } from "../commands/DeleteMembersCommand"; +import { + DescribeOrganizationConfigurationCommandInput, + DescribeOrganizationConfigurationCommandOutput, +} from "../commands/DescribeOrganizationConfigurationCommand"; +import { + DisableOrganizationAdminAccountCommandInput, + DisableOrganizationAdminAccountCommandOutput, +} from "../commands/DisableOrganizationAdminAccountCommand"; import { DisassociateMembershipCommandInput, DisassociateMembershipCommandOutput, } from "../commands/DisassociateMembershipCommand"; +import { + EnableOrganizationAdminAccountCommandInput, + EnableOrganizationAdminAccountCommandOutput, +} from "../commands/EnableOrganizationAdminAccountCommand"; import { GetMembersCommandInput, GetMembersCommandOutput } from "../commands/GetMembersCommand"; import { ListGraphsCommandInput, ListGraphsCommandOutput } from "../commands/ListGraphsCommand"; import { ListInvitationsCommandInput, ListInvitationsCommandOutput } from "../commands/ListInvitationsCommand"; import { ListMembersCommandInput, ListMembersCommandOutput } from "../commands/ListMembersCommand"; +import { + ListOrganizationAdminAccountsCommandInput, + ListOrganizationAdminAccountsCommandOutput, +} from "../commands/ListOrganizationAdminAccountsCommand"; import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, @@ -40,14 +57,20 @@ import { } from "../commands/StartMonitoringMemberCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { + UpdateOrganizationConfigurationCommandInput, + UpdateOrganizationConfigurationCommandOutput, +} from "../commands/UpdateOrganizationConfigurationCommand"; import { Account, + Administrator, ConflictException, Graph, InternalServerException, MemberDetail, ResourceNotFoundException, ServiceQuotaExceededException, + TooManyRequestsException, UnprocessedAccount, ValidationException, } from "../models/models_0"; @@ -179,6 +202,54 @@ export const serializeAws_restJson1DeleteMembersCommand = async ( }); }; +export const serializeAws_restJson1DescribeOrganizationConfigurationCommand = async ( + input: DescribeOrganizationConfigurationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/orgs/describeOrganizationConfiguration"; + let body: any; + body = JSON.stringify({ + ...(input.GraphArn !== undefined && input.GraphArn !== null && { GraphArn: input.GraphArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DisableOrganizationAdminAccountCommand = async ( + input: DisableOrganizationAdminAccountCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/orgs/disableAdminAccount"; + let body: any; + body = ""; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1DisassociateMembershipCommand = async ( input: DisassociateMembershipCommandInput, context: __SerdeContext @@ -203,6 +274,31 @@ export const serializeAws_restJson1DisassociateMembershipCommand = async ( }); }; +export const serializeAws_restJson1EnableOrganizationAdminAccountCommand = async ( + input: EnableOrganizationAdminAccountCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/orgs/enableAdminAccount"; + let body: any; + body = JSON.stringify({ + ...(input.AccountId !== undefined && input.AccountId !== null && { AccountId: input.AccountId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1GetMembersCommand = async ( input: GetMembersCommandInput, context: __SerdeContext @@ -305,6 +401,32 @@ export const serializeAws_restJson1ListMembersCommand = async ( }); }; +export const serializeAws_restJson1ListOrganizationAdminAccountsCommand = async ( + input: ListOrganizationAdminAccountsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/orgs/adminAccountslist"; + let body: any; + body = JSON.stringify({ + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1ListTagsForResourceCommand = async ( input: ListTagsForResourceCommandInput, context: __SerdeContext @@ -448,6 +570,32 @@ export const serializeAws_restJson1UntagResourceCommand = async ( }); }; +export const serializeAws_restJson1UpdateOrganizationConfigurationCommand = async ( + input: UpdateOrganizationConfigurationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/orgs/updateOrganizationConfiguration"; + let body: any; + body = JSON.stringify({ + ...(input.AutoEnable !== undefined && input.AutoEnable !== null && { AutoEnable: input.AutoEnable }), + ...(input.GraphArn !== undefined && input.GraphArn !== null && { GraphArn: input.GraphArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const deserializeAws_restJson1AcceptInvitationCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -827,6 +975,144 @@ const deserializeAws_restJson1DeleteMembersCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1DescribeOrganizationConfigurationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DescribeOrganizationConfigurationCommandError(output, context); + } + const contents: DescribeOrganizationConfigurationCommandOutput = { + $metadata: deserializeMetadata(output), + AutoEnable: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.AutoEnable !== undefined && data.AutoEnable !== null) { + contents.AutoEnable = __expectBoolean(data.AutoEnable); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DescribeOrganizationConfigurationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.detective#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TooManyRequestsException": + case "com.amazonaws.detective#TooManyRequestsException": + response = { + ...(await deserializeAws_restJson1TooManyRequestsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.detective#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DisableOrganizationAdminAccountCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisableOrganizationAdminAccountCommandError(output, context); + } + const contents: DisableOrganizationAdminAccountCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisableOrganizationAdminAccountCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.detective#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TooManyRequestsException": + case "com.amazonaws.detective#TooManyRequestsException": + response = { + ...(await deserializeAws_restJson1TooManyRequestsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.detective#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1DisassociateMembershipCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -902,6 +1188,73 @@ const deserializeAws_restJson1DisassociateMembershipCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1EnableOrganizationAdminAccountCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1EnableOrganizationAdminAccountCommandError(output, context); + } + const contents: EnableOrganizationAdminAccountCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1EnableOrganizationAdminAccountCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.detective#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TooManyRequestsException": + case "com.amazonaws.detective#TooManyRequestsException": + response = { + ...(await deserializeAws_restJson1TooManyRequestsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.detective#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1GetMembersCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1186,6 +1539,81 @@ const deserializeAws_restJson1ListMembersCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1ListOrganizationAdminAccountsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListOrganizationAdminAccountsCommandError(output, context); + } + const contents: ListOrganizationAdminAccountsCommandOutput = { + $metadata: deserializeMetadata(output), + Administrators: undefined, + NextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Administrators !== undefined && data.Administrators !== null) { + contents.Administrators = deserializeAws_restJson1AdministratorList(data.Administrators, context); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListOrganizationAdminAccountsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.detective#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TooManyRequestsException": + case "com.amazonaws.detective#TooManyRequestsException": + response = { + ...(await deserializeAws_restJson1TooManyRequestsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.detective#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1ListTagsForResourceCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1549,6 +1977,73 @@ const deserializeAws_restJson1UntagResourceCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1UpdateOrganizationConfigurationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateOrganizationConfigurationCommandError(output, context); + } + const contents: UpdateOrganizationConfigurationCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateOrganizationConfigurationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.detective#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TooManyRequestsException": + case "com.amazonaws.detective#TooManyRequestsException": + response = { + ...(await deserializeAws_restJson1TooManyRequestsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.detective#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + const deserializeAws_restJson1ConflictExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -1617,6 +2112,23 @@ const deserializeAws_restJson1ServiceQuotaExceededExceptionResponse = async ( return contents; }; +const deserializeAws_restJson1TooManyRequestsExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: TooManyRequestsException = { + name: "TooManyRequestsException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + const deserializeAws_restJson1ValidationExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -1686,6 +2198,28 @@ const deserializeAws_restJson1AccountIdList = (output: any, context: __SerdeCont }); }; +const deserializeAws_restJson1Administrator = (output: any, context: __SerdeContext): Administrator => { + return { + AccountId: __expectString(output.AccountId), + DelegationTime: + output.DelegationTime !== undefined && output.DelegationTime !== null + ? __expectNonNull(__parseRfc3339DateTime(output.DelegationTime)) + : undefined, + GraphArn: __expectString(output.GraphArn), + } as any; +}; + +const deserializeAws_restJson1AdministratorList = (output: any, context: __SerdeContext): Administrator[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Administrator(entry, context); + }); +}; + const deserializeAws_restJson1Graph = (output: any, context: __SerdeContext): Graph => { return { Arn: __expectString(output.Arn), @@ -1714,6 +2248,7 @@ const deserializeAws_restJson1MemberDetail = (output: any, context: __SerdeConte DisabledReason: __expectString(output.DisabledReason), EmailAddress: __expectString(output.EmailAddress), GraphArn: __expectString(output.GraphArn), + InvitationType: __expectString(output.InvitationType), InvitedTime: output.InvitedTime !== undefined && output.InvitedTime !== null ? __expectNonNull(__parseRfc3339DateTime(output.InvitedTime)) diff --git a/clients/client-ec2/README.md b/clients/client-ec2/README.md index baa1274726e1..b78825ac5b6d 100644 --- a/clients/client-ec2/README.md +++ b/clients/client-ec2/README.md @@ -9,10 +9,10 @@ AWS SDK for JavaScript EC2 Client for Node.js, Browser and React Native. Amazon Elastic Compute Cloud -

              Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS Cloud. +

              Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the Amazon Web Services Cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically isolated section of the -AWS Cloud where you can launch AWS resources in a virtual network that you've defined. Amazon Elastic Block Store +Amazon Web Services Cloud where you can launch Amazon Web Services resources in a virtual network that you've defined. Amazon Elastic Block Store (Amazon EBS) provides block level storage volumes for use with EC2 instances. EBS volumes are highly available and reliable storage volumes that can be attached to any running instance and used like a hard drive.

              To learn more, see the following resources:

              @@ -30,7 +30,7 @@ and reliable storage volumes that can be attached to any running instance and us

            • -

              AWS VPN: AWS VPN product page, AWS VPN documentation +

              Amazon Web Services VPN: Amazon Web Services VPN product page, Amazon Web Services VPN documentation

            diff --git a/clients/client-ec2/src/EC2.ts b/clients/client-ec2/src/EC2.ts index 79afc2648edd..c1bdd79bc955 100644 --- a/clients/client-ec2/src/EC2.ts +++ b/clients/client-ec2/src/EC2.ts @@ -2156,6 +2156,11 @@ import { ModifyVpcEndpointServiceConfigurationCommandInput, ModifyVpcEndpointServiceConfigurationCommandOutput, } from "./commands/ModifyVpcEndpointServiceConfigurationCommand"; +import { + ModifyVpcEndpointServicePayerResponsibilityCommand, + ModifyVpcEndpointServicePayerResponsibilityCommandInput, + ModifyVpcEndpointServicePayerResponsibilityCommandOutput, +} from "./commands/ModifyVpcEndpointServicePayerResponsibilityCommand"; import { ModifyVpcEndpointServicePermissionsCommand, ModifyVpcEndpointServicePermissionsCommandInput, @@ -2515,10 +2520,10 @@ import { EC2Client } from "./EC2Client"; /** * Amazon Elastic Compute Cloud - *

            Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS Cloud. + *

            Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the Amazon Web Services Cloud. * Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications * faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically isolated section of the - * AWS Cloud where you can launch AWS resources in a virtual network that you've defined. Amazon Elastic Block Store + * Amazon Web Services Cloud where you can launch Amazon Web Services resources in a virtual network that you've defined. Amazon Elastic Block Store * (Amazon EBS) provides block level storage volumes for use with EC2 instances. EBS volumes are highly available * and reliable storage volumes that can be attached to any running instance and used like a hard drive.

            *

            To learn more, see the following resources:

            @@ -2536,7 +2541,7 @@ import { EC2Client } from "./EC2Client"; *

            *
          • *
          • - *

            AWS VPN: AWS VPN product page, AWS VPN documentation + *

            Amazon Web Services VPN: Amazon Web Services VPN product page, Amazon Web Services VPN documentation *

            *
          • *
          @@ -9376,7 +9381,7 @@ export class EC2 extends EC2Client { } /** - *

          Describes attributes of your AWS account. The following are the supported account attributes:

          + *

          Describes attributes of your Amazon Web Services account. The following are the supported account attributes:

          *
            *
          • *

            @@ -12914,7 +12919,10 @@ export class EC2 extends EC2Client { } /** - *

            [VPC only] Describes the stale security group rules for security groups in a specified VPC. Rules are stale when they reference a deleted security group in a peer VPC, or a security group in a peer VPC for which the VPC peering connection has been deleted.

            + *

            [VPC only] Describes the stale security group rules for security groups in a specified VPC. + * Rules are stale when they reference a deleted security group in the same VPC or in a peer VPC, + * or if they reference a security group in a peer VPC for which the VPC peering connection has + * been deleted.

            */ public describeStaleSecurityGroups( args: DescribeStaleSecurityGroupsCommandInput, @@ -18606,6 +18614,40 @@ export class EC2 extends EC2Client { } } + /** + *

            Modifies the payer responsibility for your VPC endpoint service.

            + */ + public modifyVpcEndpointServicePayerResponsibility( + args: ModifyVpcEndpointServicePayerResponsibilityCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public modifyVpcEndpointServicePayerResponsibility( + args: ModifyVpcEndpointServicePayerResponsibilityCommandInput, + cb: (err: any, data?: ModifyVpcEndpointServicePayerResponsibilityCommandOutput) => void + ): void; + public modifyVpcEndpointServicePayerResponsibility( + args: ModifyVpcEndpointServicePayerResponsibilityCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ModifyVpcEndpointServicePayerResponsibilityCommandOutput) => void + ): void; + public modifyVpcEndpointServicePayerResponsibility( + args: ModifyVpcEndpointServicePayerResponsibilityCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: ModifyVpcEndpointServicePayerResponsibilityCommandOutput) => void), + cb?: (err: any, data?: ModifyVpcEndpointServicePayerResponsibilityCommandOutput) => void + ): Promise | void { + const command = new ModifyVpcEndpointServicePayerResponsibilityCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

            Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, * IAM roles, and Amazon Web Services accounts) to connect to your endpoint service.

            diff --git a/clients/client-ec2/src/EC2Client.ts b/clients/client-ec2/src/EC2Client.ts index bcb935c2afa7..db64a08244ed 100644 --- a/clients/client-ec2/src/EC2Client.ts +++ b/clients/client-ec2/src/EC2Client.ts @@ -1528,6 +1528,10 @@ import { ModifyVpcEndpointServiceConfigurationCommandInput, ModifyVpcEndpointServiceConfigurationCommandOutput, } from "./commands/ModifyVpcEndpointServiceConfigurationCommand"; +import { + ModifyVpcEndpointServicePayerResponsibilityCommandInput, + ModifyVpcEndpointServicePayerResponsibilityCommandOutput, +} from "./commands/ModifyVpcEndpointServicePayerResponsibilityCommand"; import { ModifyVpcEndpointServicePermissionsCommandInput, ModifyVpcEndpointServicePermissionsCommandOutput, @@ -2211,6 +2215,7 @@ export type ServiceInputTypes = | ModifyVpcEndpointCommandInput | ModifyVpcEndpointConnectionNotificationCommandInput | ModifyVpcEndpointServiceConfigurationCommandInput + | ModifyVpcEndpointServicePayerResponsibilityCommandInput | ModifyVpcEndpointServicePermissionsCommandInput | ModifyVpcPeeringConnectionOptionsCommandInput | ModifyVpcTenancyCommandInput @@ -2728,6 +2733,7 @@ export type ServiceOutputTypes = | ModifyVpcEndpointCommandOutput | ModifyVpcEndpointConnectionNotificationCommandOutput | ModifyVpcEndpointServiceConfigurationCommandOutput + | ModifyVpcEndpointServicePayerResponsibilityCommandOutput | ModifyVpcEndpointServicePermissionsCommandOutput | ModifyVpcPeeringConnectionOptionsCommandOutput | ModifyVpcTenancyCommandOutput @@ -2950,10 +2956,10 @@ export interface EC2ClientResolvedConfig extends EC2ClientResolvedConfigType {} /** * Amazon Elastic Compute Cloud - *

            Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS Cloud. + *

            Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the Amazon Web Services Cloud. * Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications * faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically isolated section of the - * AWS Cloud where you can launch AWS resources in a virtual network that you've defined. Amazon Elastic Block Store + * Amazon Web Services Cloud where you can launch Amazon Web Services resources in a virtual network that you've defined. Amazon Elastic Block Store * (Amazon EBS) provides block level storage volumes for use with EC2 instances. EBS volumes are highly available * and reliable storage volumes that can be attached to any running instance and used like a hard drive.

            *

            To learn more, see the following resources:

            @@ -2971,7 +2977,7 @@ export interface EC2ClientResolvedConfig extends EC2ClientResolvedConfigType {} *

            *
          • *
          • - *

            AWS VPN: AWS VPN product page, AWS VPN documentation + *

            Amazon Web Services VPN: Amazon Web Services VPN product page, Amazon Web Services VPN documentation *

            *
          • *
          diff --git a/clients/client-ec2/src/commands/DescribeAccountAttributesCommand.ts b/clients/client-ec2/src/commands/DescribeAccountAttributesCommand.ts index 82ac43ad9ba7..a7670070ca46 100644 --- a/clients/client-ec2/src/commands/DescribeAccountAttributesCommand.ts +++ b/clients/client-ec2/src/commands/DescribeAccountAttributesCommand.ts @@ -22,7 +22,7 @@ export interface DescribeAccountAttributesCommandInput extends DescribeAccountAt export interface DescribeAccountAttributesCommandOutput extends DescribeAccountAttributesResult, __MetadataBearer {} /** - *

          Describes attributes of your AWS account. The following are the supported account attributes:

          + *

          Describes attributes of your Amazon Web Services account. The following are the supported account attributes:

          *
            *
          • *

            diff --git a/clients/client-ec2/src/commands/DescribeStaleSecurityGroupsCommand.ts b/clients/client-ec2/src/commands/DescribeStaleSecurityGroupsCommand.ts index 6a34368869f8..1ca05bd9185d 100644 --- a/clients/client-ec2/src/commands/DescribeStaleSecurityGroupsCommand.ts +++ b/clients/client-ec2/src/commands/DescribeStaleSecurityGroupsCommand.ts @@ -22,7 +22,10 @@ export interface DescribeStaleSecurityGroupsCommandInput extends DescribeStaleSe export interface DescribeStaleSecurityGroupsCommandOutput extends DescribeStaleSecurityGroupsResult, __MetadataBearer {} /** - *

            [VPC only] Describes the stale security group rules for security groups in a specified VPC. Rules are stale when they reference a deleted security group in a peer VPC, or a security group in a peer VPC for which the VPC peering connection has been deleted.

            + *

            [VPC only] Describes the stale security group rules for security groups in a specified VPC. + * Rules are stale when they reference a deleted security group in the same VPC or in a peer VPC, + * or if they reference a security group in a peer VPC for which the VPC peering connection has + * been deleted.

            * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/GetIpamResourceCidrsCommand.ts b/clients/client-ec2/src/commands/GetIpamResourceCidrsCommand.ts index 97b194efd08c..dff0c8298256 100644 --- a/clients/client-ec2/src/commands/GetIpamResourceCidrsCommand.ts +++ b/clients/client-ec2/src/commands/GetIpamResourceCidrsCommand.ts @@ -12,7 +12,8 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { GetIpamResourceCidrsRequest, GetIpamResourceCidrsResult } from "../models/models_4"; +import { GetIpamResourceCidrsRequest } from "../models/models_4"; +import { GetIpamResourceCidrsResult } from "../models/models_5"; import { deserializeAws_ec2GetIpamResourceCidrsCommand, serializeAws_ec2GetIpamResourceCidrsCommand, diff --git a/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts b/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts index fd1514926a76..4a540cb4467a 100644 --- a/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts +++ b/clients/client-ec2/src/commands/GetLaunchTemplateDataCommand.ts @@ -12,8 +12,7 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { GetLaunchTemplateDataRequest } from "../models/models_4"; -import { GetLaunchTemplateDataResult } from "../models/models_5"; +import { GetLaunchTemplateDataRequest, GetLaunchTemplateDataResult } from "../models/models_5"; import { deserializeAws_ec2GetLaunchTemplateDataCommand, serializeAws_ec2GetLaunchTemplateDataCommand, diff --git a/clients/client-ec2/src/commands/ModifyVpcEndpointServicePayerResponsibilityCommand.ts b/clients/client-ec2/src/commands/ModifyVpcEndpointServicePayerResponsibilityCommand.ts new file mode 100644 index 000000000000..6359f63662ae --- /dev/null +++ b/clients/client-ec2/src/commands/ModifyVpcEndpointServicePayerResponsibilityCommand.ts @@ -0,0 +1,110 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; +import { + ModifyVpcEndpointServicePayerResponsibilityRequest, + ModifyVpcEndpointServicePayerResponsibilityResult, +} from "../models/models_5"; +import { + deserializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityCommand, + serializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityCommand, +} from "../protocols/Aws_ec2"; + +export interface ModifyVpcEndpointServicePayerResponsibilityCommandInput + extends ModifyVpcEndpointServicePayerResponsibilityRequest {} +export interface ModifyVpcEndpointServicePayerResponsibilityCommandOutput + extends ModifyVpcEndpointServicePayerResponsibilityResult, + __MetadataBearer {} + +/** + *

            Modifies the payer responsibility for your VPC endpoint service.

            + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EC2Client, ModifyVpcEndpointServicePayerResponsibilityCommand } from "@aws-sdk/client-ec2"; // ES Modules import + * // const { EC2Client, ModifyVpcEndpointServicePayerResponsibilityCommand } = require("@aws-sdk/client-ec2"); // CommonJS import + * const client = new EC2Client(config); + * const command = new ModifyVpcEndpointServicePayerResponsibilityCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ModifyVpcEndpointServicePayerResponsibilityCommandInput} for command's `input` shape. + * @see {@link ModifyVpcEndpointServicePayerResponsibilityCommandOutput} for command's `response` shape. + * @see {@link EC2ClientResolvedConfig | config} for EC2Client's `config` shape. + * + */ +export class ModifyVpcEndpointServicePayerResponsibilityCommand extends $Command< + ModifyVpcEndpointServicePayerResponsibilityCommandInput, + ModifyVpcEndpointServicePayerResponsibilityCommandOutput, + EC2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ModifyVpcEndpointServicePayerResponsibilityCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EC2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler< + ModifyVpcEndpointServicePayerResponsibilityCommandInput, + ModifyVpcEndpointServicePayerResponsibilityCommandOutput + > { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EC2Client"; + const commandName = "ModifyVpcEndpointServicePayerResponsibilityCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ModifyVpcEndpointServicePayerResponsibilityRequest.filterSensitiveLog, + outputFilterSensitiveLog: ModifyVpcEndpointServicePayerResponsibilityResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: ModifyVpcEndpointServicePayerResponsibilityCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ec2/src/commands/index.ts b/clients/client-ec2/src/commands/index.ts index 04091f4e8105..f909da648f42 100644 --- a/clients/client-ec2/src/commands/index.ts +++ b/clients/client-ec2/src/commands/index.ts @@ -442,6 +442,7 @@ export * from "./ModifyVpcAttributeCommand"; export * from "./ModifyVpcEndpointCommand"; export * from "./ModifyVpcEndpointConnectionNotificationCommand"; export * from "./ModifyVpcEndpointServiceConfigurationCommand"; +export * from "./ModifyVpcEndpointServicePayerResponsibilityCommand"; export * from "./ModifyVpcEndpointServicePermissionsCommand"; export * from "./ModifyVpcPeeringConnectionOptionsCommand"; export * from "./ModifyVpcTenancyCommand"; diff --git a/clients/client-ec2/src/models/models_0.ts b/clients/client-ec2/src/models/models_0.ts index 96c3f870fcb8..2b5b8240fb91 100644 --- a/clients/client-ec2/src/models/models_0.ts +++ b/clients/client-ec2/src/models/models_0.ts @@ -6332,6 +6332,15 @@ export interface CreateCapacityReservationRequest { *

            The Amazon Resource Name (ARN) of the Outpost on which to create the Capacity Reservation.

            */ OutpostArn?: string; + + /** + *

            The Amazon Resource Name (ARN) of the cluster placement group in which + * to create the Capacity Reservation. For more information, see + * + * Capacity Reservations for cluster placement groups in the + * Amazon EC2 User Guide.

            + */ + PlacementGroupArn?: string; } export namespace CreateCapacityReservationRequest { @@ -6524,6 +6533,15 @@ export interface CapacityReservation { * Only valid for Capacity Reservations that were created by a Capacity Reservation Fleet.

            */ CapacityReservationFleetId?: string; + + /** + *

            The Amazon Resource Name (ARN) of the cluster placement group in which + * the Capacity Reservation was created. For more information, see + * + * Capacity Reservations for cluster placement groups in the + * Amazon EC2 User Guide.

            + */ + PlacementGroupArn?: string; } export namespace CapacityReservation { diff --git a/clients/client-ec2/src/models/models_1.ts b/clients/client-ec2/src/models/models_1.ts index 7ec8a82865e9..5ab5fed09d47 100644 --- a/clients/client-ec2/src/models/models_1.ts +++ b/clients/client-ec2/src/models/models_1.ts @@ -767,7 +767,7 @@ export enum FleetReplacementStrategy { /** *

            The Spot Instance replacement strategy to use when Amazon EC2 emits a rebalance * notification signal that your Spot Instance is at an elevated risk of being interrupted. - * For more information, see Capacity rebalancing in the Amazon EC2 User Guide.

            + * For more information, see Capacity rebalancing in the Amazon EC2 User Guide.

            */ export interface FleetSpotCapacityRebalanceRequest { /** @@ -790,7 +790,8 @@ export interface FleetSpotCapacityRebalanceRequest { /** *

            The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot * Instance after launching a new replacement Spot Instance.

            - *

            Valid only when ReplacementStrategy is set to launch-before-terminate.

            + *

            Required when ReplacementStrategy is set to launch-before-terminate.

            + *

            Not valid when ReplacementStrategy is set to launch.

            *

            Valid values: Minimum value of 120 seconds. Maximum value of 7200 seconds.

            */ TerminationDelay?: number; @@ -1060,7 +1061,7 @@ export interface CreateFleetRequest { * launched.

            *
          • *
          - *

          For more information, see EC2 Fleet + *

          For more information, see EC2 Fleet * request types in the Amazon EC2 User Guide.

          */ Type?: FleetType | string; @@ -3949,6 +3950,11 @@ export enum LaunchTemplateHttpTokensState { required = "required", } +export enum LaunchTemplateInstanceMetadataTagsState { + disabled = "disabled", + enabled = "enabled", +} + /** *

          The metadata options for the instance. For more information, see Instance Metadata and User Data in the * Amazon Elastic Compute Cloud User Guide.

          @@ -3969,7 +3975,8 @@ export interface LaunchTemplateInstanceMetadataOptionsRequest { HttpPutResponseHopLimit?: number; /** - *

          This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

          + *

          Enables or disables the HTTP metadata endpoint on your instances. If the parameter is not + * specified, the default state is enabled.

          * *

          If you specify a value of disabled, you will not be able to access your instance metadata. *

          @@ -3983,6 +3990,16 @@ export interface LaunchTemplateInstanceMetadataOptionsRequest { *

          */ HttpProtocolIpv6?: LaunchTemplateInstanceMetadataProtocolIpv6 | string; + + /** + *

          Set to enabled to allow access to instance tags from the instance + * metadata. Set to disabled to turn off access to instance tags from the instance + * metadata. For more information, see Work with + * instance tags using the instance metadata.

          + *

          Default: disabled + *

          + */ + InstanceMetadataTags?: LaunchTemplateInstanceMetadataTagsState | string; } export namespace LaunchTemplateInstanceMetadataOptionsRequest { @@ -5194,7 +5211,8 @@ export interface LaunchTemplateInstanceMetadataOptions { HttpPutResponseHopLimit?: number; /** - *

          This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

          + *

          Enables or disables the HTTP metadata endpoint on your instances. If the parameter is + * not specified, the default state is enabled.

          * *

          If you specify a value of disabled, you will not be able to access your instance metadata. *

          @@ -5208,6 +5226,13 @@ export interface LaunchTemplateInstanceMetadataOptions { *

          */ HttpProtocolIpv6?: LaunchTemplateInstanceMetadataProtocolIpv6 | string; + + /** + *

          + * + *

          + */ + InstanceMetadataTags?: LaunchTemplateInstanceMetadataTagsState | string; } export namespace LaunchTemplateInstanceMetadataOptions { @@ -7029,8 +7054,7 @@ export interface CreateNetworkInterfaceRequest { *

          Indicates the type of network interface. To create an Elastic Fabric Adapter (EFA), specify * efa. For more information, see * Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide. To create a trunk network interface, specify - * efa. For more information, see - * Network interface trunking in the Amazon Elastic Compute Cloud User Guide.

          + * trunk.

          */ InterfaceType?: NetworkInterfaceCreationType | string; @@ -7649,6 +7673,11 @@ export interface PlacementGroup { *

          Any tags applied to the placement group.

          */ Tags?: Tag[]; + + /** + *

          The Amazon Resource Name (ARN) of the placement group.

          + */ + GroupArn?: string; } export namespace PlacementGroup { @@ -10305,42 +10334,3 @@ export namespace CreateTransitGatewayConnectPeerRequest { } export type BgpStatus = "down" | "up"; - -/** - *

          The BGP configuration information.

          - */ -export interface TransitGatewayAttachmentBgpConfiguration { - /** - *

          The transit gateway Autonomous System Number (ASN).

          - */ - TransitGatewayAsn?: number; - - /** - *

          The peer Autonomous System Number (ASN).

          - */ - PeerAsn?: number; - - /** - *

          The interior BGP peer IP address for the transit gateway.

          - */ - TransitGatewayAddress?: string; - - /** - *

          The interior BGP peer IP address for the appliance.

          - */ - PeerAddress?: string; - - /** - *

          The BGP status.

          - */ - BgpStatus?: BgpStatus | string; -} - -export namespace TransitGatewayAttachmentBgpConfiguration { - /** - * @internal - */ - export const filterSensitiveLog = (obj: TransitGatewayAttachmentBgpConfiguration): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_2.ts b/clients/client-ec2/src/models/models_2.ts index ae5d6100575f..9fb0cfcb1a54 100644 --- a/clients/client-ec2/src/models/models_2.ts +++ b/clients/client-ec2/src/models/models_2.ts @@ -36,6 +36,7 @@ import { VpcPeeringConnection, } from "./models_0"; import { + BgpStatus, DiskImageFormat, GroupIdentifier, Ipam, @@ -49,11 +50,49 @@ import { ProtocolValue, SubnetCidrReservation, TransitGateway, - TransitGatewayAttachmentBgpConfiguration, TransitGatewayConnect, VolumeType, } from "./models_1"; +/** + *

          The BGP configuration information.

          + */ +export interface TransitGatewayAttachmentBgpConfiguration { + /** + *

          The transit gateway Autonomous System Number (ASN).

          + */ + TransitGatewayAsn?: number; + + /** + *

          The peer Autonomous System Number (ASN).

          + */ + PeerAsn?: number; + + /** + *

          The interior BGP peer IP address for the transit gateway.

          + */ + TransitGatewayAddress?: string; + + /** + *

          The interior BGP peer IP address for the appliance.

          + */ + PeerAddress?: string; + + /** + *

          The BGP status.

          + */ + BgpStatus?: BgpStatus | string; +} + +export namespace TransitGatewayAttachmentBgpConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TransitGatewayAttachmentBgpConfiguration): any => ({ + ...obj, + }); +} + /** *

          Describes the Connect peer details.

          */ @@ -1695,6 +1734,10 @@ export namespace CreateVpcEndpointServiceConfigurationRequest { }); } +export enum PayerResponsibility { + ServiceOwner = "ServiceOwner", +} + export enum DnsNameState { Failed = "failed", PendingVerification = "pendingVerification", @@ -1839,6 +1882,11 @@ export interface ServiceConfiguration { */ PrivateDnsNameConfiguration?: PrivateDnsNameConfiguration; + /** + *

          The payer responsibility.

          + */ + PayerResponsibility?: PayerResponsibility | string; + /** *

          Any tags assigned to the service.

          */ @@ -2193,7 +2241,7 @@ export interface VpnTunnelOptionsSpecification { /** *

          The number of seconds after which a DPD timeout occurs.

          - *

          Constraints: A value between 0 and 30.

          + *

          Constraints: A value greater than or equal to 30.

          *

          Default: 30 *

          */ @@ -6126,6 +6174,8 @@ export namespace DescribeAccountAttributesResult { /** *

          A filter name and value pair that is used to return a more specific list of results from a describe operation. * Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

          + *

          If you specify multiple filters, the filters are joined with an AND, and the request returns only + * results that match all of the specified filters.

          */ export interface Filter { /** @@ -6134,7 +6184,9 @@ export interface Filter { Name?: string; /** - *

          The filter values. Filter values are case-sensitive.

          + *

          The filter values. Filter values are case-sensitive. If you specify multiple values for a + * filter, the values are joined with an OR, and the request returns all results + * that match any of the specified values.

          */ Values?: string[]; } @@ -6986,15 +7038,11 @@ export interface DescribeCapacityReservationsRequest { *
        • *
        • *

          - * availability-zone-id - The Availability Zone ID of the Capacity Reservation.

          - *
        • - *
        • - *

          * instance-platform - The type of operating system for which the Capacity Reservation reserves capacity.

          *
        • *
        • *

          - * availability-zone - The Availability Zone ID of the Capacity Reservation.

          + * availability-zone - The Availability Zone of the Capacity Reservation.

          *
        • *
        • *

          @@ -8556,44 +8604,3 @@ export namespace ImportInstanceTaskDetails { ...obj, }); } - -/** - *

          Describes an import volume task.

          - */ -export interface ImportVolumeTaskDetails { - /** - *

          The Availability Zone where the resulting volume will reside.

          - */ - AvailabilityZone?: string; - - /** - *

          The number of bytes converted so far.

          - */ - BytesConverted?: number; - - /** - *

          The description you provided when starting the import volume task.

          - */ - Description?: string; - - /** - *

          The image.

          - */ - Image?: DiskImageDescription; - - /** - *

          The volume.

          - */ - Volume?: DiskImageVolumeDescription; -} - -export namespace ImportVolumeTaskDetails { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ImportVolumeTaskDetails): any => ({ - ...obj, - }); -} - -export type ConversionTaskState = "active" | "cancelled" | "cancelling" | "completed"; diff --git a/clients/client-ec2/src/models/models_3.ts b/clients/client-ec2/src/models/models_3.ts index 8c4b0f0d502d..75d58582f164 100644 --- a/clients/client-ec2/src/models/models_3.ts +++ b/clients/client-ec2/src/models/models_3.ts @@ -72,15 +72,56 @@ import { TrafficType, } from "./models_1"; import { - ConversionTaskState, + DiskImageDescription, + DiskImageVolumeDescription, Filter, FleetStateCode, IdFormat, ImportInstanceTaskDetails, - ImportVolumeTaskDetails, InstanceTagNotificationAttribute, } from "./models_2"; +/** + *

          Describes an import volume task.

          + */ +export interface ImportVolumeTaskDetails { + /** + *

          The Availability Zone where the resulting volume will reside.

          + */ + AvailabilityZone?: string; + + /** + *

          The number of bytes converted so far.

          + */ + BytesConverted?: number; + + /** + *

          The description you provided when starting the import volume task.

          + */ + Description?: string; + + /** + *

          The image.

          + */ + Image?: DiskImageDescription; + + /** + *

          The volume.

          + */ + Volume?: DiskImageVolumeDescription; +} + +export namespace ImportVolumeTaskDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportVolumeTaskDetails): any => ({ + ...obj, + }); +} + +export type ConversionTaskState = "active" | "cancelled" | "cancelling" | "completed"; + /** *

          Describes a conversion task.

          */ @@ -1567,7 +1608,8 @@ export interface FleetSpotCapacityRebalance { /** *

          The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot * Instance after launching a new replacement Spot Instance.

          - *

          Valid only when replacementStrategy is set to launch-before-terminate.

          + *

          Required when ReplacementStrategy is set to launch-before-terminate.

          + *

          Not valid when ReplacementStrategy is set to launch.

          *

          Valid values: Minimum value of 120 seconds. Maximum value of 7200 seconds.

          */ TerminationDelay?: number; @@ -5647,6 +5689,11 @@ export enum HttpTokensState { required = "required", } +export enum InstanceMetadataTagsState { + disabled = "disabled", + enabled = "enabled", +} + export type InstanceMetadataOptionsState = "applied" | "pending"; /** @@ -5665,8 +5712,7 @@ export interface InstanceMetadataOptionsResponse { State?: InstanceMetadataOptionsState | string; /** - *

          The state of token usage for your instance metadata requests. If the parameter is not - * specified in the request, the default state is optional.

          + *

          The state of token usage for your instance metadata requests.

          *

          If the state is optional, you can choose to retrieve instance metadata * with or without a signed token header on your request. If you retrieve the IAM role * credentials without a token, the version 1.0 role credentials are returned. If you @@ -5676,6 +5722,8 @@ export interface InstanceMetadataOptionsResponse { * instance metadata retrieval requests. In this state, retrieving the IAM role credential * always returns the version 2.0 credentials; the version 1.0 credentials are not * available.

          + *

          Default: optional + *

          */ HttpTokens?: HttpTokensState | string; @@ -5688,7 +5736,10 @@ export interface InstanceMetadataOptionsResponse { HttpPutResponseHopLimit?: number; /** - *

          Indicates whether the HTTP metadata endpoint on your instances is enabled or disabled.

          + *

          Indicates whether the HTTP metadata endpoint on your instances is enabled or + * disabled.

          + *

          If the value is disabled, you cannot access your + * instance metadata.

          */ HttpEndpoint?: InstanceMetadataEndpointState | string; @@ -5696,6 +5747,13 @@ export interface InstanceMetadataOptionsResponse { *

          Indicates whether the IPv6 endpoint for the instance metadata service is enabled or disabled.

          */ HttpProtocolIpv6?: InstanceMetadataProtocolState | string; + + /** + *

          Indicates whether access to instance tags from the instance metadata is enabled or + * disabled. For more information, see Work with + * instance tags using the instance metadata.

          + */ + InstanceMetadataTags?: InstanceMetadataTagsState | string; } export namespace InstanceMetadataOptionsResponse { @@ -8582,6 +8640,27 @@ export interface DescribeLaunchTemplateVersionsRequest { *
        • *
        • *

          + * http-endpoint - Indicates whether the HTTP metadata endpoint on + * your instances is enabled (enabled | disabled).

          + *
        • + *
        • + *

          + * http-protocol-ipv4 - Indicates whether the IPv4 endpoint for the + * instance metadata service is enabled (enabled | + * disabled).

          + *
        • + *
        • + *

          + * host-resource-group-arn - The ARN of the host resource group in + * which to launch the instances.

          + *
        • + *
        • + *

          + * http-tokens - The state of token usage for your instance metadata + * requests (optional | required).

          + *
        • + *
        • + *

          * iam-instance-profile - The ARN of the IAM instance * profile.

          *
        • @@ -8604,6 +8683,15 @@ export interface DescribeLaunchTemplateVersionsRequest { * *
        • *

          + * license-configuration-arn - The ARN of the license + * configuration.

          + *
        • + *
        • + *

          + * network-card-index - The index of the network card.

          + *
        • + *
        • + *

          * ram-disk-id - The RAM disk ID.

          *
        • *
        @@ -10893,6 +10981,10 @@ export interface DescribePlacementGroupsRequest { *
      • *
      • *

        + * group-arn - The Amazon Resource Name (ARN) of the placement group.

        + *
      • + *
      • + *

        * state - The state of the placement group (pending | * available | deleting | * deleted).

        @@ -11941,107 +12033,3 @@ export namespace ReservedInstancesConfiguration { ...obj, }); } - -/** - *

        Describes the modification request/s.

        - */ -export interface ReservedInstancesModificationResult { - /** - *

        The ID for the Reserved Instances that were created as part of the modification request. This field is only available when the modification is fulfilled.

        - */ - ReservedInstancesId?: string; - - /** - *

        The target Reserved Instances configurations supplied as part of the modification request.

        - */ - TargetConfiguration?: ReservedInstancesConfiguration; -} - -export namespace ReservedInstancesModificationResult { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ReservedInstancesModificationResult): any => ({ - ...obj, - }); -} - -/** - *

        Describes the ID of a Reserved Instance.

        - */ -export interface ReservedInstancesId { - /** - *

        The ID of the Reserved Instance.

        - */ - ReservedInstancesId?: string; -} - -export namespace ReservedInstancesId { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ReservedInstancesId): any => ({ - ...obj, - }); -} - -/** - *

        Describes a Reserved Instance modification.

        - */ -export interface ReservedInstancesModification { - /** - *

        A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. - * For more information, see Ensuring - * Idempotency.

        - */ - ClientToken?: string; - - /** - *

        The time when the modification request was created.

        - */ - CreateDate?: Date; - - /** - *

        The time for the modification to become effective.

        - */ - EffectiveDate?: Date; - - /** - *

        Contains target configurations along with their corresponding new Reserved Instance IDs.

        - */ - ModificationResults?: ReservedInstancesModificationResult[]; - - /** - *

        The IDs of one or more Reserved Instances.

        - */ - ReservedInstancesIds?: ReservedInstancesId[]; - - /** - *

        A unique ID for the Reserved Instance modification.

        - */ - ReservedInstancesModificationId?: string; - - /** - *

        The status of the Reserved Instances modification request.

        - */ - Status?: string; - - /** - *

        The reason for the status.

        - */ - StatusMessage?: string; - - /** - *

        The time when the modification request was last updated.

        - */ - UpdateDate?: Date; -} - -export namespace ReservedInstancesModification { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ReservedInstancesModification): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_4.ts b/clients/client-ec2/src/models/models_4.ts index fd0ba2fd07e9..f26ae106d0ff 100644 --- a/clients/client-ec2/src/models/models_4.ts +++ b/clients/client-ec2/src/models/models_4.ts @@ -44,7 +44,6 @@ import { InstanceIpv6Address, InstanceRequirements, InstanceRequirementsRequest, - IpamResourceTag, Ipv4PrefixSpecificationRequest, Ipv6PrefixSpecificationRequest, PrivateIpAddressSpecification, @@ -70,6 +69,7 @@ import { DnsNameState, Filter, IpamPoolCidr, + PayerResponsibility, ServiceConfiguration, ServiceTypeDetail, State, @@ -93,12 +93,116 @@ import { PermissionGroup, ProductCode, RecurringCharge, - ReservedInstancesModification, + ReservedInstancesConfiguration, RIProductDescription, Scope, VirtualizationType, } from "./models_3"; +/** + *

        Describes the modification request/s.

        + */ +export interface ReservedInstancesModificationResult { + /** + *

        The ID for the Reserved Instances that were created as part of the modification request. This field is only available when the modification is fulfilled.

        + */ + ReservedInstancesId?: string; + + /** + *

        The target Reserved Instances configurations supplied as part of the modification request.

        + */ + TargetConfiguration?: ReservedInstancesConfiguration; +} + +export namespace ReservedInstancesModificationResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ReservedInstancesModificationResult): any => ({ + ...obj, + }); +} + +/** + *

        Describes the ID of a Reserved Instance.

        + */ +export interface ReservedInstancesId { + /** + *

        The ID of the Reserved Instance.

        + */ + ReservedInstancesId?: string; +} + +export namespace ReservedInstancesId { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ReservedInstancesId): any => ({ + ...obj, + }); +} + +/** + *

        Describes a Reserved Instance modification.

        + */ +export interface ReservedInstancesModification { + /** + *

        A unique, case-sensitive key supplied by the client to ensure that the request is idempotent. + * For more information, see Ensuring + * Idempotency.

        + */ + ClientToken?: string; + + /** + *

        The time when the modification request was created.

        + */ + CreateDate?: Date; + + /** + *

        The time for the modification to become effective.

        + */ + EffectiveDate?: Date; + + /** + *

        Contains target configurations along with their corresponding new Reserved Instance IDs.

        + */ + ModificationResults?: ReservedInstancesModificationResult[]; + + /** + *

        The IDs of one or more Reserved Instances.

        + */ + ReservedInstancesIds?: ReservedInstancesId[]; + + /** + *

        A unique ID for the Reserved Instance modification.

        + */ + ReservedInstancesModificationId?: string; + + /** + *

        The status of the Reserved Instances modification request.

        + */ + Status?: string; + + /** + *

        The reason for the status.

        + */ + StatusMessage?: string; + + /** + *

        The time when the modification request was last updated.

        + */ + UpdateDate?: Date; +} + +export namespace ReservedInstancesModification { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ReservedInstancesModification): any => ({ + ...obj, + }); +} + /** *

        Contains the output of DescribeReservedInstancesModifications.

        */ @@ -1727,8 +1831,9 @@ export interface DescribeSnapshotTierStatusRequest { *
      • *
      • *

        - * last-tiering-operation - The state of the last archive or restore action. (archiving | archival_error | - * archival_complete | restoring | restore_error | restore_complete)

        + * last-tiering-operation - The state of the last archive or restore action. (archival-in-progress | archival-completed | + * archival-failed | permanent-restore-in-progress | permanent-restore-completed | permanent-restore-failed | + * temporary-restore-in-progress | temporary-restore-completed | temporary-restore-failed)

        *
      • *
      */ @@ -2748,7 +2853,7 @@ export enum ReplacementStrategy { /** *

      The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your * Spot Instance is at an elevated risk of being interrupted. For more information, see - * Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

      + * Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

      */ export interface SpotCapacityRebalance { /** @@ -2771,7 +2876,8 @@ export interface SpotCapacityRebalance { /** *

      The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot * Instance after launching a new replacement Spot Instance.

      - *

      Valid only when ReplacementStrategy is set to launch-before-terminate.

      + *

      Required when ReplacementStrategy is set to launch-before-terminate.

      + *

      Not valid when ReplacementStrategy is set to launch.

      *

      Valid values: Minimum value of 120 seconds. Maximum value of 7200 seconds.

      */ TerminationDelay?: number; @@ -2792,8 +2898,9 @@ export namespace SpotCapacityRebalance { */ export interface SpotMaintenanceStrategies { /** - *

      The strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an - * elevated risk of being interrupted.

      + *

      The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your + * Spot Instance is at an elevated risk of being interrupted. For more information, see + * Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

      */ CapacityRebalance?: SpotCapacityRebalance; } @@ -7065,6 +7172,11 @@ export interface ServiceDetail { */ ManagesVpcEndpoints?: boolean; + /** + *

      The payer responsibility.

      + */ + PayerResponsibility?: PayerResponsibility | string; + /** *

      Any tags assigned to the service.

      */ @@ -11047,137 +11159,3 @@ export enum IpamManagementState { managed = "managed", unmanaged = "unmanaged", } - -/** - *

      The CIDR for an IPAM resource.

      - */ -export interface IpamResourceCidr { - /** - *

      The IPAM ID for an IPAM resource.

      - */ - IpamId?: string; - - /** - *

      The scope ID for an IPAM resource.

      - */ - IpamScopeId?: string; - - /** - *

      The pool ID for an IPAM resource.

      - */ - IpamPoolId?: string; - - /** - *

      The Amazon Web Services Region for an IPAM resource.

      - */ - ResourceRegion?: string; - - /** - *

      The Amazon Web Services account number of the owner of an IPAM resource.

      - */ - ResourceOwnerId?: string; - - /** - *

      The ID of an IPAM resource.

      - */ - ResourceId?: string; - - /** - *

      The name of an IPAM resource.

      - */ - ResourceName?: string; - - /** - *

      The CIDR for an IPAM resource.

      - */ - ResourceCidr?: string; - - /** - *

      The type of IPAM resource.

      - */ - ResourceType?: IpamResourceType | string; - - /** - *

      The tags for an IPAM resource.

      - */ - ResourceTags?: IpamResourceTag[]; - - /** - *

      The IP address space in the IPAM pool that is allocated to this resource. To convert the decimal to a percentage, multiply the decimal by 100.

      - */ - IpUsage?: number; - - /** - *

      The compliance status of the IPAM resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.

      - */ - ComplianceStatus?: IpamComplianceStatus | string; - - /** - *

      The management state of the resource. For more information about management states, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.

      - */ - ManagementState?: IpamManagementState | string; - - /** - *

      The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.

      - */ - OverlapStatus?: IpamOverlapStatus | string; - - /** - *

      The ID of a VPC.

      - */ - VpcId?: string; -} - -export namespace IpamResourceCidr { - /** - * @internal - */ - export const filterSensitiveLog = (obj: IpamResourceCidr): any => ({ - ...obj, - }); -} - -export interface GetIpamResourceCidrsResult { - /** - *

      The token to use to retrieve the next page of results. This value is null when there are no more results to return.

      - */ - NextToken?: string; - - /** - *

      The resource CIDRs.

      - */ - IpamResourceCidrs?: IpamResourceCidr[]; -} - -export namespace GetIpamResourceCidrsResult { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetIpamResourceCidrsResult): any => ({ - ...obj, - }); -} - -export interface GetLaunchTemplateDataRequest { - /** - *

      Checks whether you have the required permissions for the action, without actually - * making the request, and provides an error response. If you have the required - * permissions, the error response is DryRunOperation. Otherwise, it is - * UnauthorizedOperation.

      - */ - DryRun?: boolean; - - /** - *

      The ID of the instance.

      - */ - InstanceId: string | undefined; -} - -export namespace GetLaunchTemplateDataRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetLaunchTemplateDataRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_5.ts b/clients/client-ec2/src/models/models_5.ts index 07977de1c987..97255e814c33 100644 --- a/clients/client-ec2/src/models/models_5.ts +++ b/clients/client-ec2/src/models/models_5.ts @@ -55,10 +55,10 @@ import { InstanceRequirementsRequest, Ipam, IpamPool, + IpamResourceTag, IpamScope, LaunchTemplate, ManagedPrefixList, - MarketType, NetworkInsightsAccessScopeContent, Placement, PlatformValues, @@ -87,6 +87,7 @@ import { IKEVersionsRequestListValue, InstanceTagNotificationAttribute, IpamPoolCidr, + PayerResponsibility, Phase1DHGroupNumbersRequestListValue, Phase1EncryptionAlgorithmsRequestListValue, Phase1IntegrityAlgorithmsRequestListValue, @@ -112,6 +113,7 @@ import { InstanceMetadataEndpointState, InstanceMetadataOptionsResponse, InstanceMetadataProtocolState, + InstanceMetadataTagsState, InstanceStatusEvent, LaunchPermission, Monitoring, @@ -127,7 +129,10 @@ import { ExcessCapacityTerminationPolicy, InstanceFamilyCreditSpecification, InstanceNetworkInterfaceSpecification, - IpamResourceCidr, + IpamComplianceStatus, + IpamManagementState, + IpamOverlapStatus, + IpamResourceType, LaunchTemplateConfig, Purchase, RunInstancesMonitoringEnabled, @@ -141,6 +146,140 @@ import { VolumeModification, } from "./models_4"; +/** + *

      The CIDR for an IPAM resource.

      + */ +export interface IpamResourceCidr { + /** + *

      The IPAM ID for an IPAM resource.

      + */ + IpamId?: string; + + /** + *

      The scope ID for an IPAM resource.

      + */ + IpamScopeId?: string; + + /** + *

      The pool ID for an IPAM resource.

      + */ + IpamPoolId?: string; + + /** + *

      The Amazon Web Services Region for an IPAM resource.

      + */ + ResourceRegion?: string; + + /** + *

      The Amazon Web Services account number of the owner of an IPAM resource.

      + */ + ResourceOwnerId?: string; + + /** + *

      The ID of an IPAM resource.

      + */ + ResourceId?: string; + + /** + *

      The name of an IPAM resource.

      + */ + ResourceName?: string; + + /** + *

      The CIDR for an IPAM resource.

      + */ + ResourceCidr?: string; + + /** + *

      The type of IPAM resource.

      + */ + ResourceType?: IpamResourceType | string; + + /** + *

      The tags for an IPAM resource.

      + */ + ResourceTags?: IpamResourceTag[]; + + /** + *

      The IP address space in the IPAM pool that is allocated to this resource. To convert the decimal to a percentage, multiply the decimal by 100.

      + */ + IpUsage?: number; + + /** + *

      The compliance status of the IPAM resource. For more information on compliance statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.

      + */ + ComplianceStatus?: IpamComplianceStatus | string; + + /** + *

      The management state of the resource. For more information about management states, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.

      + */ + ManagementState?: IpamManagementState | string; + + /** + *

      The overlap status of an IPAM resource. The overlap status tells you if the CIDR for a resource overlaps with another CIDR in the scope. For more information on overlap statuses, see Monitor CIDR usage by resource in the Amazon VPC IPAM User Guide.

      + */ + OverlapStatus?: IpamOverlapStatus | string; + + /** + *

      The ID of a VPC.

      + */ + VpcId?: string; +} + +export namespace IpamResourceCidr { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IpamResourceCidr): any => ({ + ...obj, + }); +} + +export interface GetIpamResourceCidrsResult { + /** + *

      The token to use to retrieve the next page of results. This value is null when there are no more results to return.

      + */ + NextToken?: string; + + /** + *

      The resource CIDRs.

      + */ + IpamResourceCidrs?: IpamResourceCidr[]; +} + +export namespace GetIpamResourceCidrsResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetIpamResourceCidrsResult): any => ({ + ...obj, + }); +} + +export interface GetLaunchTemplateDataRequest { + /** + *

      Checks whether you have the required permissions for the action, without actually + * making the request, and provides an error response. If you have the required + * permissions, the error response is DryRunOperation. Otherwise, it is + * UnauthorizedOperation.

      + */ + DryRun?: boolean; + + /** + *

      The ID of the instance.

      + */ + InstanceId: string | undefined; +} + +export namespace GetLaunchTemplateDataRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetLaunchTemplateDataRequest): any => ({ + ...obj, + }); +} + export interface GetLaunchTemplateDataResult { /** *

      The instance data.

      @@ -1818,7 +1957,7 @@ export namespace ImportImageLicenseConfigurationRequest { export interface ImportImageRequest { /** *

      The architecture of the virtual machine.

      - *

      Valid values: i386 | x86_64 | arm64 + *

      Valid values: i386 | x86_64 *

      */ Architecture?: string; @@ -3454,12 +3593,12 @@ export namespace ModifyIdFormatRequest { */ export interface LaunchPermissionModifications { /** - *

      The Amazon Web Services account ID to add to the list of launch permissions for the AMI.

      + *

      The Amazon Web Services account ID, organization ARN, or OU ARN to add to the list of launch permissions for the AMI.

      */ Add?: LaunchPermission[]; /** - *

      The Amazon Web Services account ID to remove from the list of launch permissions for the AMI.

      + *

      The Amazon Web Services account ID, organization ARN, or OU ARN to remove from the list of launch permissions for the AMI.

      */ Remove?: LaunchPermission[]; } @@ -4160,7 +4299,7 @@ export interface ModifyInstanceMetadataOptionsRequest { /** *

      Enables or disables the HTTP metadata endpoint on your instances. If - * the parameter is not specified, the existing state is maintained.

      + * this parameter is not specified, the existing state is maintained.

      *

      If you specify a value of disabled, you cannot access your * instance metadata.

      */ @@ -4178,6 +4317,16 @@ export interface ModifyInstanceMetadataOptionsRequest { * applies only if you have enabled the HTTP metadata endpoint.

      */ HttpProtocolIpv6?: InstanceMetadataProtocolState | string; + + /** + *

      Set to enabled to allow access to instance tags from the instance + * metadata. Set to disabled to turn off access to instance tags from the + * instance metadata. For more information, see Work with + * instance tags using the instance metadata.

      + *

      Default: disabled + *

      + */ + InstanceMetadataTags?: InstanceMetadataTagsState | string; } export namespace ModifyInstanceMetadataOptionsRequest { @@ -6168,6 +6317,52 @@ export namespace ModifyVpcEndpointServiceConfigurationResult { }); } +export interface ModifyVpcEndpointServicePayerResponsibilityRequest { + /** + *

      Checks whether you have the required permissions for the action, without actually making the request, + * and provides an error response. If you have the required permissions, the error response is DryRunOperation. + * Otherwise, it is UnauthorizedOperation.

      + */ + DryRun?: boolean; + + /** + *

      The ID of the service.

      + */ + ServiceId: string | undefined; + + /** + *

      The entity that is responsible for the endpoint costs. The default is the endpoint owner. + * If you set the payer responsibility to the service owner, you cannot set it back to the + * endpoint owner.

      + */ + PayerResponsibility: PayerResponsibility | string | undefined; +} + +export namespace ModifyVpcEndpointServicePayerResponsibilityRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModifyVpcEndpointServicePayerResponsibilityRequest): any => ({ + ...obj, + }); +} + +export interface ModifyVpcEndpointServicePayerResponsibilityResult { + /** + *

      Returns true if the request succeeds; otherwise, it returns an error.

      + */ + ReturnValue?: boolean; +} + +export namespace ModifyVpcEndpointServicePayerResponsibilityResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModifyVpcEndpointServicePayerResponsibilityResult): any => ({ + ...obj, + }); +} + export interface ModifyVpcEndpointServicePermissionsRequest { /** *

      Checks whether you have the required permissions for the action, without actually making the request, @@ -6662,7 +6857,7 @@ export interface ModifyVpnTunnelOptionsSpecification { /** *

      The number of seconds after which a DPD timeout occurs.

      - *

      Constraints: A value between 0 and 30.

      + *

      Constraints: A value greater than or equal to 30.

      *

      Default: 30 *

      */ @@ -6938,17 +7133,17 @@ export interface MoveByoipCidrToIpamRequest { /** *

      The BYOIP CIDR.

      */ - Cidr?: string; + Cidr: string | undefined; /** *

      The IPAM pool ID.

      */ - IpamPoolId?: string; + IpamPoolId: string | undefined; /** *

      The Amazon Web Services account ID of the owner of the IPAM pool.

      */ - IpamPoolOwner?: string; + IpamPoolOwner: string | undefined; } export namespace MoveByoipCidrToIpamRequest { @@ -9667,161 +9862,3 @@ export namespace CpuOptionsRequest { ...obj, }); } - -/** - *

      - * Describes an elastic inference accelerator. - *

      - */ -export interface ElasticInferenceAccelerator { - /** - *

      - * The type of elastic inference accelerator. The possible values are eia1.medium, eia1.large, eia1.xlarge, eia2.medium, eia2.large, and eia2.xlarge. - *

      - */ - Type: string | undefined; - - /** - *

      - * The number of elastic inference accelerators to attach to the instance. - *

      - *

      Default: 1

      - */ - Count?: number; -} - -export namespace ElasticInferenceAccelerator { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ElasticInferenceAccelerator): any => ({ - ...obj, - }); -} - -/** - *

      Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, - * see - * What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

      - */ -export interface EnclaveOptionsRequest { - /** - *

      To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter to true.

      - */ - Enabled?: boolean; -} - -export namespace EnclaveOptionsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: EnclaveOptionsRequest): any => ({ - ...obj, - }); -} - -/** - *

      Indicates whether your instance is configured for hibernation. This parameter is valid - * only if the instance meets the hibernation - * prerequisites. For - * more information, see Hibernate your instance in the - * Amazon EC2 User Guide.

      - */ -export interface HibernationOptionsRequest { - /** - *

      If you set this parameter to true, your instance is enabled for - * hibernation.

      - *

      Default: false - *

      - */ - Configured?: boolean; -} - -export namespace HibernationOptionsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: HibernationOptionsRequest): any => ({ - ...obj, - }); -} - -/** - *

      The options for Spot Instances.

      - */ -export interface SpotMarketOptions { - /** - *

      The maximum hourly price you're willing to pay for the Spot Instances. The default is - * the On-Demand price.

      - */ - MaxPrice?: string; - - /** - *

      The Spot Instance request type. For RunInstances, persistent Spot - * Instance requests are only supported when the instance interruption behavior is either hibernate or - * stop.

      - */ - SpotInstanceType?: SpotInstanceType | string; - - /** - *

      Deprecated.

      - */ - BlockDurationMinutes?: number; - - /** - *

      The end date of the request, in UTC format - * (YYYY-MM-DDTHH:MM:SSZ). - * Supported only for persistent requests.

      - *
        - *
      • - *

        For a persistent request, the request remains active until the - * ValidUntil date and time is reached. Otherwise, the request - * remains active until you cancel it.

        - *
      • - *
      • - *

        For a one-time request, ValidUntil is not supported. The request - * remains active until all instances launch or you cancel the request.

        - *
      • - *
      - */ - ValidUntil?: Date; - - /** - *

      The behavior when a Spot Instance is interrupted. The default is - * terminate.

      - */ - InstanceInterruptionBehavior?: InstanceInterruptionBehavior | string; -} - -export namespace SpotMarketOptions { - /** - * @internal - */ - export const filterSensitiveLog = (obj: SpotMarketOptions): any => ({ - ...obj, - }); -} - -/** - *

      Describes the market (purchasing) option for the instances.

      - */ -export interface InstanceMarketOptionsRequest { - /** - *

      The market type.

      - */ - MarketType?: MarketType | string; - - /** - *

      The options for Spot Instances.

      - */ - SpotOptions?: SpotMarketOptions; -} - -export namespace InstanceMarketOptionsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: InstanceMarketOptionsRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_6.ts b/clients/client-ec2/src/models/models_6.ts index e655e8951b35..ea066247c5cf 100644 --- a/clients/client-ec2/src/models/models_6.ts +++ b/clients/client-ec2/src/models/models_6.ts @@ -11,30 +11,184 @@ import { BlockDeviceMapping, CreditSpecificationRequest, ElasticGpuSpecification, + InstanceInterruptionBehavior, InstanceIpv6Address, LocalGatewayRoute, + MarketType, Placement, ShutdownBehavior, + SpotInstanceType, } from "./models_1"; import { ClientVpnConnectionStatus, Filter, TransitGatewayRoute } from "./models_2"; import { HttpTokensState, InstanceMetadataEndpointState, InstanceMetadataProtocolState, + InstanceMetadataTagsState, InstanceState, NetworkInsightsAccessScopeAnalysis, NetworkInsightsAnalysis, } from "./models_3"; import { InstanceNetworkInterfaceSpecification, RunInstancesMonitoringEnabled } from "./models_4"; -import { - CapacityReservationSpecification, - CpuOptionsRequest, - ElasticInferenceAccelerator, - EnclaveOptionsRequest, - HibernationOptionsRequest, - InstanceMarketOptionsRequest, - InstanceMonitoring, -} from "./models_5"; +import { CapacityReservationSpecification, CpuOptionsRequest, InstanceMonitoring } from "./models_5"; + +/** + *

      + * Describes an elastic inference accelerator. + *

      + */ +export interface ElasticInferenceAccelerator { + /** + *

      + * The type of elastic inference accelerator. The possible values are eia1.medium, eia1.large, eia1.xlarge, eia2.medium, eia2.large, and eia2.xlarge. + *

      + */ + Type: string | undefined; + + /** + *

      + * The number of elastic inference accelerators to attach to the instance. + *

      + *

      Default: 1

      + */ + Count?: number; +} + +export namespace ElasticInferenceAccelerator { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ElasticInferenceAccelerator): any => ({ + ...obj, + }); +} + +/** + *

      Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves. For more information, + * see + * What is Amazon Web Services Nitro Enclaves? in the Amazon Web Services Nitro Enclaves User Guide.

      + */ +export interface EnclaveOptionsRequest { + /** + *

      To enable the instance for Amazon Web Services Nitro Enclaves, set this parameter to true.

      + */ + Enabled?: boolean; +} + +export namespace EnclaveOptionsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnclaveOptionsRequest): any => ({ + ...obj, + }); +} + +/** + *

      Indicates whether your instance is configured for hibernation. This parameter is valid + * only if the instance meets the hibernation + * prerequisites. For + * more information, see Hibernate your instance in the + * Amazon EC2 User Guide.

      + */ +export interface HibernationOptionsRequest { + /** + *

      If you set this parameter to true, your instance is enabled for + * hibernation.

      + *

      Default: false + *

      + */ + Configured?: boolean; +} + +export namespace HibernationOptionsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: HibernationOptionsRequest): any => ({ + ...obj, + }); +} + +/** + *

      The options for Spot Instances.

      + */ +export interface SpotMarketOptions { + /** + *

      The maximum hourly price you're willing to pay for the Spot Instances. The default is + * the On-Demand price.

      + */ + MaxPrice?: string; + + /** + *

      The Spot Instance request type. For RunInstances, persistent Spot + * Instance requests are only supported when the instance interruption behavior is either hibernate or + * stop.

      + */ + SpotInstanceType?: SpotInstanceType | string; + + /** + *

      Deprecated.

      + */ + BlockDurationMinutes?: number; + + /** + *

      The end date of the request, in UTC format + * (YYYY-MM-DDTHH:MM:SSZ). + * Supported only for persistent requests.

      + *
        + *
      • + *

        For a persistent request, the request remains active until the + * ValidUntil date and time is reached. Otherwise, the request + * remains active until you cancel it.

        + *
      • + *
      • + *

        For a one-time request, ValidUntil is not supported. The request + * remains active until all instances launch or you cancel the request.

        + *
      • + *
      + */ + ValidUntil?: Date; + + /** + *

      The behavior when a Spot Instance is interrupted. The default is + * terminate.

      + */ + InstanceInterruptionBehavior?: InstanceInterruptionBehavior | string; +} + +export namespace SpotMarketOptions { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SpotMarketOptions): any => ({ + ...obj, + }); +} + +/** + *

      Describes the market (purchasing) option for the instances.

      + */ +export interface InstanceMarketOptionsRequest { + /** + *

      The market type.

      + */ + MarketType?: MarketType | string; + + /** + *

      The options for Spot Instances.

      + */ + SpotOptions?: SpotMarketOptions; +} + +export namespace InstanceMarketOptionsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InstanceMarketOptionsRequest): any => ({ + ...obj, + }); +} /** *

      The launch template to use. You must specify either the launch template ID or @@ -91,8 +245,7 @@ export namespace LicenseConfigurationRequest { */ export interface InstanceMetadataOptionsRequest { /** - *

      The state of token usage for your instance metadata requests. If the parameter is not - * specified in the request, the default state is optional.

      + *

      The state of token usage for your instance metadata requests.

      *

      If the state is optional, you can choose to retrieve instance metadata * with or without a signed token header on your request. If you retrieve the IAM role * credentials without a token, the version 1.0 role credentials are returned. If you @@ -102,6 +255,8 @@ export interface InstanceMetadataOptionsRequest { * instance metadata retrieval requests. In this state, retrieving the IAM role credentials * always returns the version 2.0 credentials; the version 1.0 credentials are not * available.

      + *

      Default: optional + *

      */ HttpTokens?: HttpTokensState | string; @@ -114,10 +269,11 @@ export interface InstanceMetadataOptionsRequest { HttpPutResponseHopLimit?: number; /** - *

      Enables or disables the HTTP metadata endpoint on your instances. If the parameter is not - * specified, the default state is enabled.

      - *

      If you specify a value of disabled, you will not be able to access your + *

      Enables or disables the HTTP metadata endpoint on your instances.

      + *

      If you specify a value of disabled, you cannot access your * instance metadata.

      + *

      Default: enabled + *

      */ HttpEndpoint?: InstanceMetadataEndpointState | string; @@ -125,6 +281,14 @@ export interface InstanceMetadataOptionsRequest { *

      Enables or disables the IPv6 endpoint for the instance metadata service.

      */ HttpProtocolIpv6?: InstanceMetadataProtocolState | string; + + /** + *

      Set to enabled to allow access to instance tags from the instance metadata. Set to disabled to turn off + * access to instance tags from the instance metadata. For more information, see Work with instance tags using the instance metadata.

      + *

      Default: disabled + *

      + */ + InstanceMetadataTags?: InstanceMetadataTagsState | string; } export namespace InstanceMetadataOptionsRequest { diff --git a/clients/client-ec2/src/protocols/Aws_ec2.ts b/clients/client-ec2/src/protocols/Aws_ec2.ts index 0d6af76e9d95..198af71b7346 100644 --- a/clients/client-ec2/src/protocols/Aws_ec2.ts +++ b/clients/client-ec2/src/protocols/Aws_ec2.ts @@ -1503,6 +1503,10 @@ import { ModifyVpcEndpointServiceConfigurationCommandInput, ModifyVpcEndpointServiceConfigurationCommandOutput, } from "../commands/ModifyVpcEndpointServiceConfigurationCommand"; +import { + ModifyVpcEndpointServicePayerResponsibilityCommandInput, + ModifyVpcEndpointServicePayerResponsibilityCommandOutput, +} from "../commands/ModifyVpcEndpointServicePayerResponsibilityCommand"; import { ModifyVpcEndpointServicePermissionsCommandInput, ModifyVpcEndpointServicePermissionsCommandOutput, @@ -2202,7 +2206,6 @@ import { TrafficMirrorSession, TrafficMirrorTarget, TransitGateway, - TransitGatewayAttachmentBgpConfiguration, TransitGatewayConnect, TransitGatewayConnectOptions, TransitGatewayConnectRequestBgpOptions, @@ -2433,7 +2436,6 @@ import { IKEVersionsRequestListValue, ImportInstanceTaskDetails, ImportInstanceVolumeDetailItem, - ImportVolumeTaskDetails, InstanceEventWindowStateChange, InstanceTagNotificationAttribute, IpamPoolCidr, @@ -2458,6 +2460,7 @@ import { ServiceTypeDetail, SuccessfulQueuedPurchaseDeletion, TargetNetwork, + TransitGatewayAttachmentBgpConfiguration, TransitGatewayConnectPeer, TransitGatewayConnectPeerConfiguration, TransitGatewayMulticastDeregisteredGroupMembers, @@ -2663,6 +2666,7 @@ import { ImportImageLicenseConfigurationResponse, ImportImageTask, ImportSnapshotTask, + ImportVolumeTaskDetails, InferenceAcceleratorInfo, InferenceDeviceInfo, Instance, @@ -2719,9 +2723,6 @@ import { Reservation, ReservedInstances, ReservedInstancesConfiguration, - ReservedInstancesId, - ReservedInstancesModification, - ReservedInstancesModificationResult, RootDeviceType, SnapshotDetail, SnapshotTaskDetail, @@ -2953,8 +2954,6 @@ import { GetIpamPoolCidrsRequest, GetIpamPoolCidrsResult, GetIpamResourceCidrsRequest, - GetIpamResourceCidrsResult, - GetLaunchTemplateDataRequest, HistoryRecord, InstanceEventWindowDisassociationRequest, InstanceFamilyCreditSpecification, @@ -2963,7 +2962,6 @@ import { InstanceUsage, IntegrateServices, IpamAddressHistoryRecord, - IpamResourceCidr, Ipv6CidrAssociation, LaunchSpecification, LaunchTemplateConfig, @@ -2972,6 +2970,9 @@ import { PricingDetail, PrivateDnsDetails, Purchase, + ReservedInstancesId, + ReservedInstancesModification, + ReservedInstancesModificationResult, ReservedInstancesOffering, RunInstancesMonitoringEnabled, ScheduledInstance, @@ -3025,8 +3026,8 @@ import { DiskImageDetail, DnsServersOptionsModifyStructure, EbsInstanceBlockDeviceSpecification, - ElasticInferenceAccelerator, - EnclaveOptionsRequest, + GetIpamResourceCidrsResult, + GetLaunchTemplateDataRequest, GetLaunchTemplateDataResult, GetManagedPrefixListAssociationsRequest, GetManagedPrefixListAssociationsResult, @@ -3060,7 +3061,6 @@ import { GetVpnConnectionDeviceSampleConfigurationResult, GetVpnConnectionDeviceTypesRequest, GetVpnConnectionDeviceTypesResult, - HibernationOptionsRequest, ImageDiskContainer, ImportClientVpnClientCertificateRevocationListRequest, ImportClientVpnClientCertificateRevocationListResult, @@ -3078,10 +3078,10 @@ import { ImportVolumeResult, InstanceBlockDeviceMappingSpecification, InstanceCreditSpecificationRequest, - InstanceMarketOptionsRequest, InstanceMonitoring, InstanceRequirementsWithMetadataRequest, IpamCidrAuthorizationContext, + IpamResourceCidr, LaunchPermissionModifications, ListSnapshotsInRecycleBinRequest, ListSnapshotsInRecycleBinResult, @@ -3172,6 +3172,8 @@ import { ModifyVpcEndpointResult, ModifyVpcEndpointServiceConfigurationRequest, ModifyVpcEndpointServiceConfigurationResult, + ModifyVpcEndpointServicePayerResponsibilityRequest, + ModifyVpcEndpointServicePayerResponsibilityResult, ModifyVpcEndpointServicePermissionsRequest, ModifyVpcEndpointServicePermissionsResult, ModifyVpcPeeringConnectionOptionsRequest, @@ -3286,7 +3288,6 @@ import { SecurityGroupRuleUpdate, SnapshotDiskContainer, SnapshotRecycleBinInfo, - SpotMarketOptions, SpotPlacementScore, SuccessfulInstanceCreditSpecificationItem, TargetConfiguration, @@ -3307,6 +3308,10 @@ import { VpnConnectionDeviceType, } from "../models/models_5"; import { + ElasticInferenceAccelerator, + EnclaveOptionsRequest, + HibernationOptionsRequest, + InstanceMarketOptionsRequest, InstanceMetadataOptionsRequest, InstanceStateChange, LaunchTemplateSpecification, @@ -3332,6 +3337,7 @@ import { SearchTransitGatewayRoutesResult, SecurityGroupRuleDescription, SendDiagnosticInterruptRequest, + SpotMarketOptions, StartInstancesRequest, StartInstancesResult, StartNetworkInsightsAccessScopeAnalysisRequest, @@ -10465,6 +10471,22 @@ export const serializeAws_ec2ModifyVpcEndpointServiceConfigurationCommand = asyn return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityCommand = async ( + input: ModifyVpcEndpointServicePayerResponsibilityCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-www-form-urlencoded", + }; + let body: any; + body = buildFormUrlencodedString({ + ...serializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityRequest(input, context), + Action: "ModifyVpcEndpointServicePayerResponsibility", + Version: "2016-11-15", + }); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_ec2ModifyVpcEndpointServicePermissionsCommand = async ( input: ModifyVpcEndpointServicePermissionsCommandInput, context: __SerdeContext @@ -31893,6 +31915,52 @@ const deserializeAws_ec2ModifyVpcEndpointServiceConfigurationCommandError = asyn return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityResult(data, context); + const response: ModifyVpcEndpointServicePayerResponsibilityCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadEc2ErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.Errors.Error.code || parsedBody.Errors.Error.Code || errorCode; + response = { + ...parsedBody.Errors.Error, + name: `${errorCode}`, + message: parsedBody.Errors.Error.message || parsedBody.Errors.Error.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_ec2ModifyVpcEndpointServicePermissionsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -37046,6 +37114,9 @@ const serializeAws_ec2CreateCapacityReservationRequest = ( if (input.OutpostArn !== undefined && input.OutpostArn !== null) { entries["OutpostArn"] = input.OutpostArn; } + if (input.PlacementGroupArn !== undefined && input.PlacementGroupArn !== null) { + entries["PlacementGroupArn"] = input.PlacementGroupArn; + } return entries; }; @@ -47166,6 +47237,9 @@ const serializeAws_ec2InstanceMetadataOptionsRequest = ( if (input.HttpProtocolIpv6 !== undefined && input.HttpProtocolIpv6 !== null) { entries["HttpProtocolIpv6"] = input.HttpProtocolIpv6; } + if (input.InstanceMetadataTags !== undefined && input.InstanceMetadataTags !== null) { + entries["InstanceMetadataTags"] = input.InstanceMetadataTags; + } return entries; }; @@ -48236,6 +48310,9 @@ const serializeAws_ec2LaunchTemplateInstanceMetadataOptionsRequest = ( if (input.HttpProtocolIpv6 !== undefined && input.HttpProtocolIpv6 !== null) { entries["HttpProtocolIpv6"] = input.HttpProtocolIpv6; } + if (input.InstanceMetadataTags !== undefined && input.InstanceMetadataTags !== null) { + entries["InstanceMetadataTags"] = input.InstanceMetadataTags; + } return entries; }; @@ -49446,6 +49523,9 @@ const serializeAws_ec2ModifyInstanceMetadataOptionsRequest = ( if (input.HttpProtocolIpv6 !== undefined && input.HttpProtocolIpv6 !== null) { entries["HttpProtocolIpv6"] = input.HttpProtocolIpv6; } + if (input.InstanceMetadataTags !== undefined && input.InstanceMetadataTags !== null) { + entries["InstanceMetadataTags"] = input.InstanceMetadataTags; + } return entries; }; @@ -50394,6 +50474,23 @@ const serializeAws_ec2ModifyVpcEndpointServiceConfigurationRequest = ( return entries; }; +const serializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityRequest = ( + input: ModifyVpcEndpointServicePayerResponsibilityRequest, + context: __SerdeContext +): any => { + const entries: any = {}; + if (input.DryRun !== undefined && input.DryRun !== null) { + entries["DryRun"] = input.DryRun; + } + if (input.ServiceId !== undefined && input.ServiceId !== null) { + entries["ServiceId"] = input.ServiceId; + } + if (input.PayerResponsibility !== undefined && input.PayerResponsibility !== null) { + entries["PayerResponsibility"] = input.PayerResponsibility; + } + return entries; +}; + const serializeAws_ec2ModifyVpcEndpointServicePermissionsRequest = ( input: ModifyVpcEndpointServicePermissionsRequest, context: __SerdeContext @@ -57828,6 +57925,7 @@ const deserializeAws_ec2CapacityReservation = (output: any, context: __SerdeCont Tags: undefined, OutpostArn: undefined, CapacityReservationFleetId: undefined, + PlacementGroupArn: undefined, }; if (output["capacityReservationId"] !== undefined) { contents.CapacityReservationId = __expectString(output["capacityReservationId"]); @@ -57895,6 +57993,9 @@ const deserializeAws_ec2CapacityReservation = (output: any, context: __SerdeCont if (output["capacityReservationFleetId"] !== undefined) { contents.CapacityReservationFleetId = __expectString(output["capacityReservationFleetId"]); } + if (output["placementGroupArn"] !== undefined) { + contents.PlacementGroupArn = __expectString(output["placementGroupArn"]); + } return contents; }; @@ -69464,6 +69565,7 @@ const deserializeAws_ec2InstanceMetadataOptionsResponse = ( HttpPutResponseHopLimit: undefined, HttpEndpoint: undefined, HttpProtocolIpv6: undefined, + InstanceMetadataTags: undefined, }; if (output["state"] !== undefined) { contents.State = __expectString(output["state"]); @@ -69480,6 +69582,9 @@ const deserializeAws_ec2InstanceMetadataOptionsResponse = ( if (output["httpProtocolIpv6"] !== undefined) { contents.HttpProtocolIpv6 = __expectString(output["httpProtocolIpv6"]); } + if (output["instanceMetadataTags"] !== undefined) { + contents.InstanceMetadataTags = __expectString(output["instanceMetadataTags"]); + } return contents; }; @@ -71986,6 +72091,7 @@ const deserializeAws_ec2LaunchTemplateInstanceMetadataOptions = ( HttpPutResponseHopLimit: undefined, HttpEndpoint: undefined, HttpProtocolIpv6: undefined, + InstanceMetadataTags: undefined, }; if (output["state"] !== undefined) { contents.State = __expectString(output["state"]); @@ -72002,6 +72108,9 @@ const deserializeAws_ec2LaunchTemplateInstanceMetadataOptions = ( if (output["httpProtocolIpv6"] !== undefined) { contents.HttpProtocolIpv6 = __expectString(output["httpProtocolIpv6"]); } + if (output["instanceMetadataTags"] !== undefined) { + contents.InstanceMetadataTags = __expectString(output["instanceMetadataTags"]); + } return contents; }; @@ -73499,6 +73608,19 @@ const deserializeAws_ec2ModifyVpcEndpointServiceConfigurationResult = ( return contents; }; +const deserializeAws_ec2ModifyVpcEndpointServicePayerResponsibilityResult = ( + output: any, + context: __SerdeContext +): ModifyVpcEndpointServicePayerResponsibilityResult => { + const contents: any = { + ReturnValue: undefined, + }; + if (output["return"] !== undefined) { + contents.ReturnValue = __parseBoolean(output["return"]); + } + return contents; +}; + const deserializeAws_ec2ModifyVpcEndpointServicePermissionsResult = ( output: any, context: __SerdeContext @@ -75183,6 +75305,7 @@ const deserializeAws_ec2PlacementGroup = (output: any, context: __SerdeContext): PartitionCount: undefined, GroupId: undefined, Tags: undefined, + GroupArn: undefined, }; if (output["groupName"] !== undefined) { contents.GroupName = __expectString(output["groupName"]); @@ -75205,6 +75328,9 @@ const deserializeAws_ec2PlacementGroup = (output: any, context: __SerdeContext): if (output["tagSet"] !== undefined && output["tagSet"]["item"] !== undefined) { contents.Tags = deserializeAws_ec2TagList(__getArrayIfSingleItem(output["tagSet"]["item"]), context); } + if (output["groupArn"] !== undefined) { + contents.GroupArn = __expectString(output["groupArn"]); + } return contents; }; @@ -78109,6 +78235,7 @@ const deserializeAws_ec2ServiceConfiguration = (output: any, context: __SerdeCon BaseEndpointDnsNames: undefined, PrivateDnsName: undefined, PrivateDnsNameConfiguration: undefined, + PayerResponsibility: undefined, Tags: undefined, }; if (output.serviceType === "") { @@ -78180,6 +78307,9 @@ const deserializeAws_ec2ServiceConfiguration = (output: any, context: __SerdeCon context ); } + if (output["payerResponsibility"] !== undefined) { + contents.PayerResponsibility = __expectString(output["payerResponsibility"]); + } if (output.tagSet === "") { contents.Tags = []; } @@ -78213,6 +78343,7 @@ const deserializeAws_ec2ServiceDetail = (output: any, context: __SerdeContext): VpcEndpointPolicySupported: undefined, AcceptanceRequired: undefined, ManagesVpcEndpoints: undefined, + PayerResponsibility: undefined, Tags: undefined, PrivateDnsNameVerificationState: undefined, }; @@ -78273,6 +78404,9 @@ const deserializeAws_ec2ServiceDetail = (output: any, context: __SerdeContext): if (output["managesVpcEndpoints"] !== undefined) { contents.ManagesVpcEndpoints = __parseBoolean(output["managesVpcEndpoints"]); } + if (output["payerResponsibility"] !== undefined) { + contents.PayerResponsibility = __expectString(output["payerResponsibility"]); + } if (output.tagSet === "") { contents.Tags = []; } diff --git a/clients/client-ecs/src/ECS.ts b/clients/client-ecs/src/ECS.ts index 35c0e0e9e796..e96d9fd05b5b 100644 --- a/clients/client-ecs/src/ECS.ts +++ b/clients/client-ecs/src/ECS.ts @@ -1699,7 +1699,7 @@ export class ECS extends ECSClient { * Amazon Elastic Container Service Developer Guide.

      *

      Alternatively, you can use StartTask to use your own scheduler or * place tasks manually on specific container instances.

      - *

      The Amazon ECS API follows an eventual consistency model. This is because the distributed + *

      The Amazon ECS API follows an eventual consistency model. This is because of the distributed * nature of the system supporting the API. This means that the result of an API command * you run that affects your Amazon ECS resources might not be immediately visible to all * subsequent commands you run. Keep this in mind when you carry out an API command that diff --git a/clients/client-ecs/src/commands/RunTaskCommand.ts b/clients/client-ecs/src/commands/RunTaskCommand.ts index 00e86702c4fd..aa0988847375 100644 --- a/clients/client-ecs/src/commands/RunTaskCommand.ts +++ b/clients/client-ecs/src/commands/RunTaskCommand.ts @@ -26,7 +26,7 @@ export interface RunTaskCommandOutput extends RunTaskResponse, __MetadataBearer * Amazon Elastic Container Service Developer Guide.

      *

      Alternatively, you can use StartTask to use your own scheduler or * place tasks manually on specific container instances.

      - *

      The Amazon ECS API follows an eventual consistency model. This is because the distributed + *

      The Amazon ECS API follows an eventual consistency model. This is because of the distributed * nature of the system supporting the API. This means that the result of an API command * you run that affects your Amazon ECS resources might not be immediately visible to all * subsequent commands you run. Keep this in mind when you carry out an API command that diff --git a/clients/client-ecs/src/models/models_0.ts b/clients/client-ecs/src/models/models_0.ts index 05a53f30a679..6a8c979a09e5 100644 --- a/clients/client-ecs/src/models/models_0.ts +++ b/clients/client-ecs/src/models/models_0.ts @@ -128,7 +128,7 @@ export interface AutoScalingGroupProvider { /** *

      The managed termination protection setting to use for the Auto Scaling group capacity * provider. This determines whether the Auto Scaling group has managed termination - * protection.

      + * protection. The default is disabled.

      * *

      When using managed termination protection, managed scaling must also be used * otherwise managed termination protection doesn't work.

      @@ -845,7 +845,8 @@ export interface Attachment { /** *

      The status of the attachment. Valid values are PRECREATED, * CREATED, ATTACHING, ATTACHED, - * DETACHING, DETACHED, and DELETED.

      + * DETACHING, DETACHED, DELETED, and + * FAILED.

      */ status?: string; @@ -10197,7 +10198,7 @@ export interface UpdateContainerInstancesStateRequest { cluster?: string; /** - *

      A list of container instance IDs or full ARN entries.

      + *

      A list of up to 10 container instance IDs or full ARN entries.

      */ containerInstances: string[] | undefined; diff --git a/clients/client-eks/README.md b/clients/client-eks/README.md index d10762cc615d..b2bc5a4fca58 100644 --- a/clients/client-eks/README.md +++ b/clients/client-eks/README.md @@ -7,16 +7,16 @@ AWS SDK for JavaScript EKS Client for Node.js, Browser and React Native. -

      Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on -Amazon Web Services without needing to stand up or maintain your own Kubernetes control plane. -Kubernetes is an open-source system for automating the deployment, scaling, and -management of containerized applications.

      -

      Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use -all the existing plugins and tooling from the Kubernetes community. Applications running -on Amazon EKS are fully compatible with applications running on any standard Kubernetes -environment, whether running in on-premises data centers or public clouds. This means -that you can easily migrate any standard Kubernetes application to Amazon EKS without any -code modification required.

      +

      Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy +for you to run Kubernetes on Amazon Web Services without needing to stand up or maintain +your own Kubernetes control plane. Kubernetes is an open-source system for automating +the deployment, scaling, and management of containerized applications.

      +

      Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so +you can use all the existing plugins and tooling from the Kubernetes community. +Applications running on Amazon EKS are fully compatible with applications +running on any standard Kubernetes environment, whether running in on-premises data +centers or public clouds. This means that you can easily migrate any standard Kubernetes +application to Amazon EKS without any code modification required.

      ## Installing diff --git a/clients/client-eks/src/EKS.ts b/clients/client-eks/src/EKS.ts index 2b83a8bfa9ca..ac04fee2f9df 100644 --- a/clients/client-eks/src/EKS.ts +++ b/clients/client-eks/src/EKS.ts @@ -149,16 +149,16 @@ import { import { EKSClient } from "./EKSClient"; /** - *

      Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on - * Amazon Web Services without needing to stand up or maintain your own Kubernetes control plane. - * Kubernetes is an open-source system for automating the deployment, scaling, and - * management of containerized applications.

      - *

      Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use - * all the existing plugins and tooling from the Kubernetes community. Applications running - * on Amazon EKS are fully compatible with applications running on any standard Kubernetes - * environment, whether running in on-premises data centers or public clouds. This means - * that you can easily migrate any standard Kubernetes application to Amazon EKS without any - * code modification required.

      + *

      Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy + * for you to run Kubernetes on Amazon Web Services without needing to stand up or maintain + * your own Kubernetes control plane. Kubernetes is an open-source system for automating + * the deployment, scaling, and management of containerized applications.

      + *

      Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so + * you can use all the existing plugins and tooling from the Kubernetes community. + * Applications running on Amazon EKS are fully compatible with applications + * running on any standard Kubernetes environment, whether running in on-premises data + * centers or public clouds. This means that you can easily migrate any standard Kubernetes + * application to Amazon EKS without any code modification required.

      */ export class EKS extends EKSClient { /** @@ -286,7 +286,8 @@ export class EKS extends EKSClient { *

      In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, * you must configure your Kubernetes tooling to communicate with the API server and launch * nodes into your cluster. For more information, see Managing Cluster - * Authentication and Launching Amazon EKS nodes in the Amazon EKS User Guide.

      + * Authentication and Launching Amazon EKS nodes in the + * Amazon EKS User Guide.

      */ public createCluster( args: CreateClusterCommandInput, @@ -318,30 +319,34 @@ export class EKS extends EKSClient { } /** - *

      Creates an Fargate profile for your Amazon EKS cluster. You must have at least one Fargate - * profile in a cluster to be able to run pods on Fargate.

      - *

      The Fargate profile allows an administrator to declare which pods run on Fargate and specify - * which pods run on which Fargate profile. This declaration is done through the profile’s - * selectors. Each profile can have up to five selectors that contain a namespace and - * labels. A namespace is required for every selector. The label field consists of multiple - * optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a - * to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is - * run on Fargate.

      - *

      When you create a Fargate profile, you must specify a pod execution role to use with the - * pods that are scheduled with the profile. This role is added to the cluster's Kubernetes - * Role Based Access - * Control (RBAC) for authorization so that the kubelet that is - * running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can - * appear in your cluster as a node. The pod execution role also provides IAM permissions - * to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more - * information, see Pod Execution Role in the Amazon EKS User Guide.

      - *

      Fargate profiles are immutable. However, you can create a new updated profile to replace - * an existing profile and then delete the original after the updated profile has finished - * creating.

      - *

      If any Fargate profiles in a cluster are in the DELETING status, you must - * wait for that Fargate profile to finish deleting before you can create any other profiles - * in that cluster.

      - *

      For more information, see Fargate Profile in the Amazon EKS User Guide.

      + *

      Creates an Fargate profile for your Amazon EKS cluster. You + * must have at least one Fargate profile in a cluster to be able to run + * pods on Fargate.

      + *

      The Fargate profile allows an administrator to declare which pods run + * on Fargate and specify which pods run on which Fargate + * profile. This declaration is done through the profile’s selectors. Each profile can have + * up to five selectors that contain a namespace and labels. A namespace is required for + * every selector. The label field consists of multiple optional key-value pairs. Pods that + * match the selectors are scheduled on Fargate. If a to-be-scheduled pod + * matches any of the selectors in the Fargate profile, then that pod is run + * on Fargate.

      + *

      When you create a Fargate profile, you must specify a pod execution + * role to use with the pods that are scheduled with the profile. This role is added to the + * cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the + * kubelet that is running on the Fargate infrastructure + * can register with your Amazon EKS cluster so that it can appear in your cluster + * as a node. The pod execution role also provides IAM permissions to the + * Fargate infrastructure to allow read access to Amazon ECR + * image repositories. For more information, see Pod + * Execution Role in the Amazon EKS User Guide.

      + *

      Fargate profiles are immutable. However, you can create a new updated + * profile to replace an existing profile and then delete the original after the updated + * profile has finished creating.

      + *

      If any Fargate profiles in a cluster are in the DELETING + * status, you must wait for that Fargate profile to finish deleting before + * you can create any other profiles in that cluster.

      + *

      For more information, see Fargate Profile in the + * Amazon EKS User Guide.

      */ public createFargateProfile( args: CreateFargateProfileCommandInput, @@ -373,15 +378,16 @@ export class EKS extends EKSClient { } /** - *

      Creates a managed node group for an Amazon EKS cluster. You can only create a node group - * for your cluster that is equal to the current Kubernetes version for the cluster. All - * node groups are created with the latest AMI release version for the respective minor - * Kubernetes version of the cluster, unless you deploy a custom AMI using a launch - * template. For more information about using launch templates, see Launch + *

      Creates a managed node group for an Amazon EKS cluster. You can only create a + * node group for your cluster that is equal to the current Kubernetes version for the + * cluster. All node groups are created with the latest AMI release version for the + * respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using + * a launch template. For more information about using launch templates, see Launch * template support.

      - *

      An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that - * are managed by Amazon Web Services for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS - * optimized Amazon Linux 2 AMI. For more information, see Managed + *

      An Amazon EKS managed node group is an Amazon EC2 + * Auto Scaling group and associated Amazon EC2 instances that are managed by + * Amazon Web Services for an Amazon EKS cluster. Each node group uses a version + * of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see Managed * Node Groups in the Amazon EKS User Guide.

      */ public createNodegroup( @@ -448,8 +454,8 @@ export class EKS extends EKSClient { * are deleted properly. Otherwise, you can have orphaned resources in your VPC that * prevent you from being able to delete the VPC. For more information, see Deleting a * Cluster in the Amazon EKS User Guide.

      - *

      If you have managed node groups or Fargate profiles attached to the cluster, you must - * delete them first. For more information, see DeleteNodegroup and DeleteFargateProfile.

      + *

      If you have managed node groups or Fargate profiles attached to the + * cluster, you must delete them first. For more information, see DeleteNodegroup and DeleteFargateProfile.

      */ public deleteCluster( args: DeleteClusterCommandInput, @@ -482,13 +488,14 @@ export class EKS extends EKSClient { /** *

      Deletes an Fargate profile.

      - *

      When you delete a Fargate profile, any pods running on Fargate that were created with the - * profile are deleted. If those pods match another Fargate profile, then they are scheduled - * on Fargate with that profile. If they no longer match any Fargate profiles, then they are not - * scheduled on Fargate and they may remain in a pending state.

      - *

      Only one Fargate profile in a cluster can be in the DELETING status at a - * time. You must wait for a Fargate profile to finish deleting before you can delete any - * other profiles in that cluster.

      + *

      When you delete a Fargate profile, any pods running on Fargate that were created with the profile are deleted. If those pods match + * another Fargate profile, then they are scheduled on Fargate with that profile. If they no longer match any Fargate profiles, then + * they are not scheduled on Fargate and they may remain in a pending + * state.

      + *

      Only one Fargate profile in a cluster can be in the + * DELETING status at a time. You must wait for a Fargate + * profile to finish deleting before you can delete any other profiles in that + * cluster.

      */ public deleteFargateProfile( args: DeleteFargateProfileCommandInput, @@ -552,7 +559,8 @@ export class EKS extends EKSClient { } /** - *

      Deregisters a connected cluster to remove it from the Amazon EKS control plane.

      + *

      Deregisters a connected cluster to remove it from the Amazon EKS control + * plane.

      */ public deregisterCluster( args: DeregisterClusterCommandInput, @@ -784,8 +792,8 @@ export class EKS extends EKSClient { } /** - *

      Returns descriptive information about an update against your Amazon EKS cluster or - * associated managed node group.

      + *

      Returns descriptive information about an update against your Amazon EKS + * cluster or associated managed node group.

      *

      When the status of the update is Succeeded, the update is complete. If an * update fails, the status is Failed, and an error detail explains the reason * for the failure.

      @@ -822,8 +830,8 @@ export class EKS extends EKSClient { /** *

      Disassociates an identity provider configuration from a cluster. If you disassociate * an identity provider from your cluster, users included in the provider can no longer - * access the cluster. However, you can still access the cluster with Amazon Web Services IAM - * users.

      + * access the cluster. However, you can still access the cluster with Amazon Web Services + * IAM users.

      */ public disassociateIdentityProviderConfig( args: DisassociateIdentityProviderConfigCommandInput, @@ -881,7 +889,8 @@ export class EKS extends EKSClient { } /** - *

      Lists the Amazon EKS clusters in your Amazon Web Services account in the specified Region.

      + *

      Lists the Amazon EKS clusters in your Amazon Web Services account in the + * specified Region.

      */ public listClusters( args: ListClustersCommandInput, @@ -910,8 +919,8 @@ export class EKS extends EKSClient { } /** - *

      Lists the Fargate profiles associated with the specified cluster in your Amazon Web Services - * account in the specified Region.

      + *

      Lists the Fargate profiles associated with the specified cluster in + * your Amazon Web Services account in the specified Region.

      */ public listFargateProfiles( args: ListFargateProfilesCommandInput, @@ -975,8 +984,9 @@ export class EKS extends EKSClient { } /** - *

      Lists the Amazon EKS managed node groups associated with the specified cluster in your - * Amazon Web Services account in the specified Region. Self-managed node groups are not listed.

      + *

      Lists the Amazon EKS managed node groups associated with the specified cluster + * in your Amazon Web Services account in the specified Region. Self-managed node groups are + * not listed.

      */ public listNodegroups( args: ListNodegroupsCommandInput, @@ -1040,8 +1050,8 @@ export class EKS extends EKSClient { } /** - *

      Lists the updates associated with an Amazon EKS cluster or managed node group in your Amazon Web Services - * account, in the specified Region.

      + *

      Lists the updates associated with an Amazon EKS cluster or managed node group + * in your Amazon Web Services account, in the specified Region.

      */ public listUpdates(args: ListUpdatesCommandInput, options?: __HttpHandlerOptions): Promise; public listUpdates(args: ListUpdatesCommandInput, cb: (err: any, data?: ListUpdatesCommandOutput) => void): void; @@ -1068,14 +1078,18 @@ export class EKS extends EKSClient { /** *

      Connects a Kubernetes cluster to the Amazon EKS control plane.

      - *

      Any Kubernetes cluster can be connected to the Amazon EKS control plane to view current information about the cluster and its nodes. - *

      + *

      Any Kubernetes cluster can be connected to the Amazon EKS control plane to + * view current information about the cluster and its nodes.

      *

      Cluster connection requires two steps. First, send a * RegisterClusterRequest - * to add it to the Amazon EKS control plane.

      - *

      Second, a Manifest containing the activationID and activationCode must be applied to the Kubernetes cluster through it's native provider to provide visibility.

      + * to add it to the Amazon EKS + * control plane.

      + *

      Second, a Manifest containing the activationID and + * activationCode must be applied to the Kubernetes cluster through it's + * native provider to provide visibility.

      * - *

      After the Manifest is updated and applied, then the connected cluster is visible to the Amazon EKS control plane. If the Manifest is not applied within three days, + *

      After the Manifest is updated and applied, then the connected cluster is visible to + * the Amazon EKS control plane. If the Manifest is not applied within three days, * then the connected cluster will no longer be visible and must be deregistered. See DeregisterCluster.

      */ public registerCluster( @@ -1111,10 +1125,9 @@ export class EKS extends EKSClient { *

      Associates the specified tags to a resource with the specified * resourceArn. If existing tags on a resource are not specified in the * request parameters, they are not changed. When a resource is deleted, the tags - * associated with that resource are deleted as well. Tags that you create for Amazon EKS - * resources do not propagate to any other resources associated with the cluster. For - * example, if you tag a cluster with this operation, that tag does not automatically - * propagate to the subnets and nodes associated with the cluster.

      + * associated with that resource are deleted as well. Tags that you create for Amazon EKS resources do not propagate to any other resources associated with the + * cluster. For example, if you tag a cluster with this operation, that tag does not + * automatically propagate to the subnets and nodes associated with the cluster.

      */ public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): Promise; public tagResource(args: TagResourceCommandInput, cb: (err: any, data?: TagResourceCommandOutput) => void): void; @@ -1198,24 +1211,25 @@ export class EKS extends EKSClient { } /** - *

      Updates an Amazon EKS cluster configuration. Your cluster continues to function during the - * update. The response output includes an update ID that you can use to track the status - * of your cluster update with the DescribeUpdate API operation.

      + *

      Updates an Amazon EKS cluster configuration. Your cluster continues to + * function during the update. The response output includes an update ID that you can use + * to track the status of your cluster update with the DescribeUpdate API + * operation.

      *

      You can use this API operation to enable or disable exporting the Kubernetes control - * plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't - * exported to CloudWatch Logs. For more information, see Amazon EKS - * Cluster Control Plane Logs in the - * + * plane logs for your cluster to CloudWatch Logs. By default, cluster control plane + * logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the + * * Amazon EKS User Guide * .

      * - *

      CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported - * control plane logs. For more information, see CloudWatch Pricing.

      + *

      CloudWatch Logs ingestion, archive storage, and data scanning rates apply to + * exported control plane logs. For more information, see CloudWatch + * Pricing.

      *
      *

      You can also use this API operation to enable or disable public and private access to * your cluster's Kubernetes API server endpoint. By default, public access is enabled, and - * private access is disabled. For more information, see Amazon EKS cluster - * endpoint access control in the + * private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the + * * Amazon EKS User Guide * .

      * @@ -1256,10 +1270,9 @@ export class EKS extends EKSClient { } /** - *

      Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues - * to function during the update. The response output includes an update ID that you can - * use to track the status of your cluster update with the DescribeUpdate - * API operation.

      + *

      Updates an Amazon EKS cluster to the specified Kubernetes version. Your + * cluster continues to function during the update. The response output includes an update + * ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.

      *

      Cluster updates are asynchronous, and they should finish within a few minutes. During * an update, the cluster status moves to UPDATING (this status transition is * eventually consistent). When the update is complete (either Failed or @@ -1298,11 +1311,10 @@ export class EKS extends EKSClient { } /** - *

      Updates an Amazon EKS managed node group configuration. Your node group continues to - * function during the update. The response output includes an update ID that you can use - * to track the status of your node group update with the DescribeUpdate - * API operation. Currently you can update the Kubernetes labels for a node group or the - * scaling configuration.

      + *

      Updates an Amazon EKS managed node group configuration. Your node group + * continues to function during the update. The response output includes an update ID that + * you can use to track the status of your node group update with the DescribeUpdate API operation. Currently you can update the Kubernetes + * labels for a node group or the scaling configuration.

      */ public updateNodegroupConfig( args: UpdateNodegroupConfigCommandInput, @@ -1334,7 +1346,8 @@ export class EKS extends EKSClient { } /** - *

      Updates the Kubernetes version or AMI version of an Amazon EKS managed node group.

      + *

      Updates the Kubernetes version or AMI version of an Amazon EKS managed node + * group.

      *

      You can update a node group using a launch template only if the node group was * originally deployed with a launch template. If you need to update a custom AMI in a node * group that was deployed with a launch template, then update your custom AMI, specify the @@ -1344,14 +1357,14 @@ export class EKS extends EKSClient { * AMI version of a node group's current Kubernetes version by not specifying a Kubernetes * version in the request. You can update to the latest AMI version of your cluster's * current Kubernetes version by specifying your cluster's Kubernetes version in the - * request. For more information, see Amazon EKS - * optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.

      + * request. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.

      *

      You cannot roll back a node group to an earlier Kubernetes version or AMI * version.

      *

      When a node in a managed node group is terminated due to a scaling action or update, - * the pods in that node are drained first. Amazon EKS attempts to drain the nodes gracefully - * and will fail if it is unable to do so. You can force the update if Amazon EKS - * is unable to drain the nodes as a result of a pod disruption budget issue.

      + * the pods in that node are drained first. Amazon EKS attempts to drain the nodes + * gracefully and will fail if it is unable to do so. You can force the update + * if Amazon EKS is unable to drain the nodes as a result of a pod disruption + * budget issue.

      */ public updateNodegroupVersion( args: UpdateNodegroupVersionCommandInput, diff --git a/clients/client-eks/src/EKSClient.ts b/clients/client-eks/src/EKSClient.ts index c39cdcd0c748..9e76b8ec69f5 100644 --- a/clients/client-eks/src/EKSClient.ts +++ b/clients/client-eks/src/EKSClient.ts @@ -351,16 +351,16 @@ type EKSClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOp export interface EKSClientResolvedConfig extends EKSClientResolvedConfigType {} /** - *

      Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on - * Amazon Web Services without needing to stand up or maintain your own Kubernetes control plane. - * Kubernetes is an open-source system for automating the deployment, scaling, and - * management of containerized applications.

      - *

      Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use - * all the existing plugins and tooling from the Kubernetes community. Applications running - * on Amazon EKS are fully compatible with applications running on any standard Kubernetes - * environment, whether running in on-premises data centers or public clouds. This means - * that you can easily migrate any standard Kubernetes application to Amazon EKS without any - * code modification required.

      + *

      Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy + * for you to run Kubernetes on Amazon Web Services without needing to stand up or maintain + * your own Kubernetes control plane. Kubernetes is an open-source system for automating + * the deployment, scaling, and management of containerized applications.

      + *

      Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so + * you can use all the existing plugins and tooling from the Kubernetes community. + * Applications running on Amazon EKS are fully compatible with applications + * running on any standard Kubernetes environment, whether running in on-premises data + * centers or public clouds. This means that you can easily migrate any standard Kubernetes + * application to Amazon EKS without any code modification required.

      */ export class EKSClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-eks/src/commands/CreateClusterCommand.ts b/clients/client-eks/src/commands/CreateClusterCommand.ts index 8ebf5e7272bf..e36023f76b8c 100644 --- a/clients/client-eks/src/commands/CreateClusterCommand.ts +++ b/clients/client-eks/src/commands/CreateClusterCommand.ts @@ -40,7 +40,8 @@ export interface CreateClusterCommandOutput extends CreateClusterResponse, __Met *

      In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster, * you must configure your Kubernetes tooling to communicate with the API server and launch * nodes into your cluster. For more information, see Managing Cluster - * Authentication and Launching Amazon EKS nodes in the Amazon EKS User Guide.

      + * Authentication and Launching Amazon EKS nodes in the + * Amazon EKS User Guide.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/CreateFargateProfileCommand.ts b/clients/client-eks/src/commands/CreateFargateProfileCommand.ts index 0f1f370dc3e0..08242348ced2 100644 --- a/clients/client-eks/src/commands/CreateFargateProfileCommand.ts +++ b/clients/client-eks/src/commands/CreateFargateProfileCommand.ts @@ -22,30 +22,34 @@ export interface CreateFargateProfileCommandInput extends CreateFargateProfileRe export interface CreateFargateProfileCommandOutput extends CreateFargateProfileResponse, __MetadataBearer {} /** - *

      Creates an Fargate profile for your Amazon EKS cluster. You must have at least one Fargate - * profile in a cluster to be able to run pods on Fargate.

      - *

      The Fargate profile allows an administrator to declare which pods run on Fargate and specify - * which pods run on which Fargate profile. This declaration is done through the profile’s - * selectors. Each profile can have up to five selectors that contain a namespace and - * labels. A namespace is required for every selector. The label field consists of multiple - * optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a - * to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is - * run on Fargate.

      - *

      When you create a Fargate profile, you must specify a pod execution role to use with the - * pods that are scheduled with the profile. This role is added to the cluster's Kubernetes - * Role Based Access - * Control (RBAC) for authorization so that the kubelet that is - * running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can - * appear in your cluster as a node. The pod execution role also provides IAM permissions - * to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more - * information, see Pod Execution Role in the Amazon EKS User Guide.

      - *

      Fargate profiles are immutable. However, you can create a new updated profile to replace - * an existing profile and then delete the original after the updated profile has finished - * creating.

      - *

      If any Fargate profiles in a cluster are in the DELETING status, you must - * wait for that Fargate profile to finish deleting before you can create any other profiles - * in that cluster.

      - *

      For more information, see Fargate Profile in the Amazon EKS User Guide.

      + *

      Creates an Fargate profile for your Amazon EKS cluster. You + * must have at least one Fargate profile in a cluster to be able to run + * pods on Fargate.

      + *

      The Fargate profile allows an administrator to declare which pods run + * on Fargate and specify which pods run on which Fargate + * profile. This declaration is done through the profile’s selectors. Each profile can have + * up to five selectors that contain a namespace and labels. A namespace is required for + * every selector. The label field consists of multiple optional key-value pairs. Pods that + * match the selectors are scheduled on Fargate. If a to-be-scheduled pod + * matches any of the selectors in the Fargate profile, then that pod is run + * on Fargate.

      + *

      When you create a Fargate profile, you must specify a pod execution + * role to use with the pods that are scheduled with the profile. This role is added to the + * cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the + * kubelet that is running on the Fargate infrastructure + * can register with your Amazon EKS cluster so that it can appear in your cluster + * as a node. The pod execution role also provides IAM permissions to the + * Fargate infrastructure to allow read access to Amazon ECR + * image repositories. For more information, see Pod + * Execution Role in the Amazon EKS User Guide.

      + *

      Fargate profiles are immutable. However, you can create a new updated + * profile to replace an existing profile and then delete the original after the updated + * profile has finished creating.

      + *

      If any Fargate profiles in a cluster are in the DELETING + * status, you must wait for that Fargate profile to finish deleting before + * you can create any other profiles in that cluster.

      + *

      For more information, see Fargate Profile in the + * Amazon EKS User Guide.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/CreateNodegroupCommand.ts b/clients/client-eks/src/commands/CreateNodegroupCommand.ts index 5b760fe1cc82..3677835871b4 100644 --- a/clients/client-eks/src/commands/CreateNodegroupCommand.ts +++ b/clients/client-eks/src/commands/CreateNodegroupCommand.ts @@ -22,15 +22,16 @@ export interface CreateNodegroupCommandInput extends CreateNodegroupRequest {} export interface CreateNodegroupCommandOutput extends CreateNodegroupResponse, __MetadataBearer {} /** - *

      Creates a managed node group for an Amazon EKS cluster. You can only create a node group - * for your cluster that is equal to the current Kubernetes version for the cluster. All - * node groups are created with the latest AMI release version for the respective minor - * Kubernetes version of the cluster, unless you deploy a custom AMI using a launch - * template. For more information about using launch templates, see Launch + *

      Creates a managed node group for an Amazon EKS cluster. You can only create a + * node group for your cluster that is equal to the current Kubernetes version for the + * cluster. All node groups are created with the latest AMI release version for the + * respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using + * a launch template. For more information about using launch templates, see Launch * template support.

      - *

      An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that - * are managed by Amazon Web Services for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS - * optimized Amazon Linux 2 AMI. For more information, see Managed + *

      An Amazon EKS managed node group is an Amazon EC2 + * Auto Scaling group and associated Amazon EC2 instances that are managed by + * Amazon Web Services for an Amazon EKS cluster. Each node group uses a version + * of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see Managed * Node Groups in the Amazon EKS User Guide.

      * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-eks/src/commands/DeleteClusterCommand.ts b/clients/client-eks/src/commands/DeleteClusterCommand.ts index 28884d4ab2b0..5fe9549b39fb 100644 --- a/clients/client-eks/src/commands/DeleteClusterCommand.ts +++ b/clients/client-eks/src/commands/DeleteClusterCommand.ts @@ -28,8 +28,8 @@ export interface DeleteClusterCommandOutput extends DeleteClusterResponse, __Met * are deleted properly. Otherwise, you can have orphaned resources in your VPC that * prevent you from being able to delete the VPC. For more information, see Deleting a * Cluster in the Amazon EKS User Guide.

      - *

      If you have managed node groups or Fargate profiles attached to the cluster, you must - * delete them first. For more information, see DeleteNodegroup and DeleteFargateProfile.

      + *

      If you have managed node groups or Fargate profiles attached to the + * cluster, you must delete them first. For more information, see DeleteNodegroup and DeleteFargateProfile.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/DeleteFargateProfileCommand.ts b/clients/client-eks/src/commands/DeleteFargateProfileCommand.ts index d062671ee281..8311913d4239 100644 --- a/clients/client-eks/src/commands/DeleteFargateProfileCommand.ts +++ b/clients/client-eks/src/commands/DeleteFargateProfileCommand.ts @@ -23,13 +23,14 @@ export interface DeleteFargateProfileCommandOutput extends DeleteFargateProfileR /** *

      Deletes an Fargate profile.

      - *

      When you delete a Fargate profile, any pods running on Fargate that were created with the - * profile are deleted. If those pods match another Fargate profile, then they are scheduled - * on Fargate with that profile. If they no longer match any Fargate profiles, then they are not - * scheduled on Fargate and they may remain in a pending state.

      - *

      Only one Fargate profile in a cluster can be in the DELETING status at a - * time. You must wait for a Fargate profile to finish deleting before you can delete any - * other profiles in that cluster.

      + *

      When you delete a Fargate profile, any pods running on Fargate that were created with the profile are deleted. If those pods match + * another Fargate profile, then they are scheduled on Fargate with that profile. If they no longer match any Fargate profiles, then + * they are not scheduled on Fargate and they may remain in a pending + * state.

      + *

      Only one Fargate profile in a cluster can be in the + * DELETING status at a time. You must wait for a Fargate + * profile to finish deleting before you can delete any other profiles in that + * cluster.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/DeregisterClusterCommand.ts b/clients/client-eks/src/commands/DeregisterClusterCommand.ts index 1cc725910831..68eb1abfc74c 100644 --- a/clients/client-eks/src/commands/DeregisterClusterCommand.ts +++ b/clients/client-eks/src/commands/DeregisterClusterCommand.ts @@ -22,7 +22,8 @@ export interface DeregisterClusterCommandInput extends DeregisterClusterRequest export interface DeregisterClusterCommandOutput extends DeregisterClusterResponse, __MetadataBearer {} /** - *

      Deregisters a connected cluster to remove it from the Amazon EKS control plane.

      + *

      Deregisters a connected cluster to remove it from the Amazon EKS control + * plane.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/DescribeUpdateCommand.ts b/clients/client-eks/src/commands/DescribeUpdateCommand.ts index a06425e56de0..1143aa3651e3 100644 --- a/clients/client-eks/src/commands/DescribeUpdateCommand.ts +++ b/clients/client-eks/src/commands/DescribeUpdateCommand.ts @@ -22,8 +22,8 @@ export interface DescribeUpdateCommandInput extends DescribeUpdateRequest {} export interface DescribeUpdateCommandOutput extends DescribeUpdateResponse, __MetadataBearer {} /** - *

      Returns descriptive information about an update against your Amazon EKS cluster or - * associated managed node group.

      + *

      Returns descriptive information about an update against your Amazon EKS + * cluster or associated managed node group.

      *

      When the status of the update is Succeeded, the update is complete. If an * update fails, the status is Failed, and an error detail explains the reason * for the failure.

      diff --git a/clients/client-eks/src/commands/DisassociateIdentityProviderConfigCommand.ts b/clients/client-eks/src/commands/DisassociateIdentityProviderConfigCommand.ts index c31ae9cfa5bd..f3aec6aa323e 100644 --- a/clients/client-eks/src/commands/DisassociateIdentityProviderConfigCommand.ts +++ b/clients/client-eks/src/commands/DisassociateIdentityProviderConfigCommand.ts @@ -29,8 +29,8 @@ export interface DisassociateIdentityProviderConfigCommandOutput /** *

      Disassociates an identity provider configuration from a cluster. If you disassociate * an identity provider from your cluster, users included in the provider can no longer - * access the cluster. However, you can still access the cluster with Amazon Web Services IAM - * users.

      + * access the cluster. However, you can still access the cluster with Amazon Web Services + * IAM users.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/ListClustersCommand.ts b/clients/client-eks/src/commands/ListClustersCommand.ts index 9be0a9063909..43022a232b5f 100644 --- a/clients/client-eks/src/commands/ListClustersCommand.ts +++ b/clients/client-eks/src/commands/ListClustersCommand.ts @@ -22,7 +22,8 @@ export interface ListClustersCommandInput extends ListClustersRequest {} export interface ListClustersCommandOutput extends ListClustersResponse, __MetadataBearer {} /** - *

      Lists the Amazon EKS clusters in your Amazon Web Services account in the specified Region.

      + *

      Lists the Amazon EKS clusters in your Amazon Web Services account in the + * specified Region.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/ListFargateProfilesCommand.ts b/clients/client-eks/src/commands/ListFargateProfilesCommand.ts index 42f5e5ccb562..90ddd8ec42c5 100644 --- a/clients/client-eks/src/commands/ListFargateProfilesCommand.ts +++ b/clients/client-eks/src/commands/ListFargateProfilesCommand.ts @@ -22,8 +22,8 @@ export interface ListFargateProfilesCommandInput extends ListFargateProfilesRequ export interface ListFargateProfilesCommandOutput extends ListFargateProfilesResponse, __MetadataBearer {} /** - *

      Lists the Fargate profiles associated with the specified cluster in your Amazon Web Services - * account in the specified Region.

      + *

      Lists the Fargate profiles associated with the specified cluster in + * your Amazon Web Services account in the specified Region.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/ListNodegroupsCommand.ts b/clients/client-eks/src/commands/ListNodegroupsCommand.ts index 30da914a6da8..c8dcd2d3e6be 100644 --- a/clients/client-eks/src/commands/ListNodegroupsCommand.ts +++ b/clients/client-eks/src/commands/ListNodegroupsCommand.ts @@ -22,8 +22,9 @@ export interface ListNodegroupsCommandInput extends ListNodegroupsRequest {} export interface ListNodegroupsCommandOutput extends ListNodegroupsResponse, __MetadataBearer {} /** - *

      Lists the Amazon EKS managed node groups associated with the specified cluster in your - * Amazon Web Services account in the specified Region. Self-managed node groups are not listed.

      + *

      Lists the Amazon EKS managed node groups associated with the specified cluster + * in your Amazon Web Services account in the specified Region. Self-managed node groups are + * not listed.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/ListUpdatesCommand.ts b/clients/client-eks/src/commands/ListUpdatesCommand.ts index c036564be620..486aec87e435 100644 --- a/clients/client-eks/src/commands/ListUpdatesCommand.ts +++ b/clients/client-eks/src/commands/ListUpdatesCommand.ts @@ -22,8 +22,8 @@ export interface ListUpdatesCommandInput extends ListUpdatesRequest {} export interface ListUpdatesCommandOutput extends ListUpdatesResponse, __MetadataBearer {} /** - *

      Lists the updates associated with an Amazon EKS cluster or managed node group in your Amazon Web Services - * account, in the specified Region.

      + *

      Lists the updates associated with an Amazon EKS cluster or managed node group + * in your Amazon Web Services account, in the specified Region.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/RegisterClusterCommand.ts b/clients/client-eks/src/commands/RegisterClusterCommand.ts index 6cfeb6ea2f2f..7a0326299c39 100644 --- a/clients/client-eks/src/commands/RegisterClusterCommand.ts +++ b/clients/client-eks/src/commands/RegisterClusterCommand.ts @@ -23,14 +23,18 @@ export interface RegisterClusterCommandOutput extends RegisterClusterResponse, _ /** *

      Connects a Kubernetes cluster to the Amazon EKS control plane.

      - *

      Any Kubernetes cluster can be connected to the Amazon EKS control plane to view current information about the cluster and its nodes. - *

      + *

      Any Kubernetes cluster can be connected to the Amazon EKS control plane to + * view current information about the cluster and its nodes.

      *

      Cluster connection requires two steps. First, send a * RegisterClusterRequest - * to add it to the Amazon EKS control plane.

      - *

      Second, a Manifest containing the activationID and activationCode must be applied to the Kubernetes cluster through it's native provider to provide visibility.

      + * to add it to the Amazon EKS + * control plane.

      + *

      Second, a Manifest containing the activationID and + * activationCode must be applied to the Kubernetes cluster through it's + * native provider to provide visibility.

      * - *

      After the Manifest is updated and applied, then the connected cluster is visible to the Amazon EKS control plane. If the Manifest is not applied within three days, + *

      After the Manifest is updated and applied, then the connected cluster is visible to + * the Amazon EKS control plane. If the Manifest is not applied within three days, * then the connected cluster will no longer be visible and must be deregistered. See DeregisterCluster.

      * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-eks/src/commands/TagResourceCommand.ts b/clients/client-eks/src/commands/TagResourceCommand.ts index a5ed9c7362f8..d9ae437a23e9 100644 --- a/clients/client-eks/src/commands/TagResourceCommand.ts +++ b/clients/client-eks/src/commands/TagResourceCommand.ts @@ -25,10 +25,9 @@ export interface TagResourceCommandOutput extends TagResourceResponse, __Metadat *

      Associates the specified tags to a resource with the specified * resourceArn. If existing tags on a resource are not specified in the * request parameters, they are not changed. When a resource is deleted, the tags - * associated with that resource are deleted as well. Tags that you create for Amazon EKS - * resources do not propagate to any other resources associated with the cluster. For - * example, if you tag a cluster with this operation, that tag does not automatically - * propagate to the subnets and nodes associated with the cluster.

      + * associated with that resource are deleted as well. Tags that you create for Amazon EKS resources do not propagate to any other resources associated with the + * cluster. For example, if you tag a cluster with this operation, that tag does not + * automatically propagate to the subnets and nodes associated with the cluster.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/UpdateClusterConfigCommand.ts b/clients/client-eks/src/commands/UpdateClusterConfigCommand.ts index 588cd5973037..874383f17981 100644 --- a/clients/client-eks/src/commands/UpdateClusterConfigCommand.ts +++ b/clients/client-eks/src/commands/UpdateClusterConfigCommand.ts @@ -22,24 +22,25 @@ export interface UpdateClusterConfigCommandInput extends UpdateClusterConfigRequ export interface UpdateClusterConfigCommandOutput extends UpdateClusterConfigResponse, __MetadataBearer {} /** - *

      Updates an Amazon EKS cluster configuration. Your cluster continues to function during the - * update. The response output includes an update ID that you can use to track the status - * of your cluster update with the DescribeUpdate API operation.

      + *

      Updates an Amazon EKS cluster configuration. Your cluster continues to + * function during the update. The response output includes an update ID that you can use + * to track the status of your cluster update with the DescribeUpdate API + * operation.

      *

      You can use this API operation to enable or disable exporting the Kubernetes control - * plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't - * exported to CloudWatch Logs. For more information, see Amazon EKS - * Cluster Control Plane Logs in the - * + * plane logs for your cluster to CloudWatch Logs. By default, cluster control plane + * logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the + * * Amazon EKS User Guide * .

      * - *

      CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported - * control plane logs. For more information, see CloudWatch Pricing.

      + *

      CloudWatch Logs ingestion, archive storage, and data scanning rates apply to + * exported control plane logs. For more information, see CloudWatch + * Pricing.

      *
      *

      You can also use this API operation to enable or disable public and private access to * your cluster's Kubernetes API server endpoint. By default, public access is enabled, and - * private access is disabled. For more information, see Amazon EKS cluster - * endpoint access control in the + * private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the + * * Amazon EKS User Guide * .

      * diff --git a/clients/client-eks/src/commands/UpdateClusterVersionCommand.ts b/clients/client-eks/src/commands/UpdateClusterVersionCommand.ts index 7469473281ee..eb0597b5e125 100644 --- a/clients/client-eks/src/commands/UpdateClusterVersionCommand.ts +++ b/clients/client-eks/src/commands/UpdateClusterVersionCommand.ts @@ -22,10 +22,9 @@ export interface UpdateClusterVersionCommandInput extends UpdateClusterVersionRe export interface UpdateClusterVersionCommandOutput extends UpdateClusterVersionResponse, __MetadataBearer {} /** - *

      Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues - * to function during the update. The response output includes an update ID that you can - * use to track the status of your cluster update with the DescribeUpdate - * API operation.

      + *

      Updates an Amazon EKS cluster to the specified Kubernetes version. Your + * cluster continues to function during the update. The response output includes an update + * ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.

      *

      Cluster updates are asynchronous, and they should finish within a few minutes. During * an update, the cluster status moves to UPDATING (this status transition is * eventually consistent). When the update is complete (either Failed or diff --git a/clients/client-eks/src/commands/UpdateNodegroupConfigCommand.ts b/clients/client-eks/src/commands/UpdateNodegroupConfigCommand.ts index 857ed84df93f..2be66eda9e63 100644 --- a/clients/client-eks/src/commands/UpdateNodegroupConfigCommand.ts +++ b/clients/client-eks/src/commands/UpdateNodegroupConfigCommand.ts @@ -22,11 +22,10 @@ export interface UpdateNodegroupConfigCommandInput extends UpdateNodegroupConfig export interface UpdateNodegroupConfigCommandOutput extends UpdateNodegroupConfigResponse, __MetadataBearer {} /** - *

      Updates an Amazon EKS managed node group configuration. Your node group continues to - * function during the update. The response output includes an update ID that you can use - * to track the status of your node group update with the DescribeUpdate - * API operation. Currently you can update the Kubernetes labels for a node group or the - * scaling configuration.

      + *

      Updates an Amazon EKS managed node group configuration. Your node group + * continues to function during the update. The response output includes an update ID that + * you can use to track the status of your node group update with the DescribeUpdate API operation. Currently you can update the Kubernetes + * labels for a node group or the scaling configuration.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/commands/UpdateNodegroupVersionCommand.ts b/clients/client-eks/src/commands/UpdateNodegroupVersionCommand.ts index bfdbde9670f6..bdf41cd7387f 100644 --- a/clients/client-eks/src/commands/UpdateNodegroupVersionCommand.ts +++ b/clients/client-eks/src/commands/UpdateNodegroupVersionCommand.ts @@ -22,7 +22,8 @@ export interface UpdateNodegroupVersionCommandInput extends UpdateNodegroupVersi export interface UpdateNodegroupVersionCommandOutput extends UpdateNodegroupVersionResponse, __MetadataBearer {} /** - *

      Updates the Kubernetes version or AMI version of an Amazon EKS managed node group.

      + *

      Updates the Kubernetes version or AMI version of an Amazon EKS managed node + * group.

      *

      You can update a node group using a launch template only if the node group was * originally deployed with a launch template. If you need to update a custom AMI in a node * group that was deployed with a launch template, then update your custom AMI, specify the @@ -32,14 +33,14 @@ export interface UpdateNodegroupVersionCommandOutput extends UpdateNodegroupVers * AMI version of a node group's current Kubernetes version by not specifying a Kubernetes * version in the request. You can update to the latest AMI version of your cluster's * current Kubernetes version by specifying your cluster's Kubernetes version in the - * request. For more information, see Amazon EKS - * optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.

      + * request. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.

      *

      You cannot roll back a node group to an earlier Kubernetes version or AMI * version.

      *

      When a node in a managed node group is terminated due to a scaling action or update, - * the pods in that node are drained first. Amazon EKS attempts to drain the nodes gracefully - * and will fail if it is unable to do so. You can force the update if Amazon EKS - * is unable to drain the nodes as a result of a pod disruption budget issue.

      + * the pods in that node are drained first. Amazon EKS attempts to drain the nodes + * gracefully and will fail if it is unable to do so. You can force the update + * if Amazon EKS is unable to drain the nodes as a result of a pod disruption + * budget issue.

      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-eks/src/models/models_0.ts b/clients/client-eks/src/models/models_0.ts index f4d0a5b79a7a..78b2ec09a15b 100644 --- a/clients/client-eks/src/models/models_0.ts +++ b/clients/client-eks/src/models/models_0.ts @@ -2,8 +2,8 @@ import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException /** *

      You don't have permissions to perform the requested operation. The user or role that - * is making the request must have at least one IAM permissions policy attached that - * grants the required permissions. For more information, see Access + * is making the request must have at least one IAM permissions policy + * attached that grants the required permissions. For more information, see Access * Management in the IAM User Guide.

      */ export interface AccessDeniedException extends __SmithyException, $MetadataBearer { @@ -135,8 +135,8 @@ export interface Addon { modifiedAt?: Date; /** - *

      The Amazon Resource Name (ARN) of the IAM role that is bound to the Kubernetes service account used - * by the add-on.

      + *

      The Amazon Resource Name (ARN) of the IAM role that is bound to the Kubernetes service + * account used by the add-on.

      */ serviceAccountRoleArn?: string; @@ -255,15 +255,15 @@ export type AMITypes = | "CUSTOM"; /** - *

      Identifies the Key Management Service (KMS) key used to encrypt the secrets.

      + *

      Identifies the Key Management Service (KMS) key used to encrypt the + * secrets.

      */ export interface Provider { /** *

      Amazon Resource Name (ARN) or alias of the KMS key. The KMS key must be symmetric, created in the same * region as the cluster, and if the KMS key was created in a different account, the user * must have access to the KMS key. For more information, see Allowing - * Users in Other Accounts to Use a KMS key in the Key Management Service - * Developer Guide.

      + * Users in Other Accounts to Use a KMS key in the Key Management Service Developer Guide.

      */ keyArn?: string; } @@ -287,7 +287,8 @@ export interface EncryptionConfig { resources?: string[]; /** - *

      Key Management Service (KMS) key. Either the ARN or the alias can be used.

      + *

      Key Management Service (KMS) key. Either the ARN or the alias can be + * used.

      */ provider?: Provider; } @@ -383,7 +384,7 @@ export interface ErrorDetail { *

      * OperationNotPermitted: The service role * associated with the cluster doesn't have the required access permissions for - * Amazon EKS.

      + * Amazon EKS.

      *
    • *
    • *

      @@ -796,8 +797,8 @@ export interface OidcIdentityProviderConfigRequest { /** *

      The key value pairs that describe required claims in the identity token. If set, each * claim is verified to be present in the token with a matching value. For the maximum - * number of claims that you can require, see Amazon EKS service quotas in the - * Amazon EKS User Guide.

      + * number of claims that you can require, see Amazon EKS service + * quotas in the Amazon EKS User Guide.

      */ requiredClaims?: { [key: string]: string }; } @@ -867,11 +868,13 @@ export namespace AssociateIdentityProviderConfigResponse { } /** - *

      An Auto Scaling group that is associated with an Amazon EKS managed node group.

      + *

      An Auto Scaling group that is associated with an Amazon EKS managed node + * group.

      */ export interface AutoScalingGroup { /** - *

      The name of the Auto Scaling group associated with an Amazon EKS managed node group.

      + *

      The name of the Auto Scaling group associated with an Amazon EKS managed node + * group.

      */ name?: string; } @@ -920,8 +923,8 @@ export interface CreateAddonRequest { serviceAccountRoleArn?: string; /** - *

      How to resolve parameter value conflicts when migrating an existing add-on to an Amazon EKS - * add-on.

      + *

      How to resolve parameter value conflicts when migrating an existing add-on to an + * Amazon EKS add-on.

      */ resolveConflicts?: ResolveConflicts | string; @@ -964,16 +967,21 @@ export namespace CreateAddonResponse { }); } +export enum IpFamily { + IPV4 = "ipv4", + IPV6 = "ipv6", +} + /** *

      The Kubernetes network configuration for the cluster.

      */ export interface KubernetesNetworkConfigRequest { /** - *

      The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a - * block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR - * blocks. We recommend that you specify a block that does not overlap with resources in - * other networks that are peered or connected to your VPC. The block must meet the - * following requirements:

      + *

      Don't specify a value if you select ipv6 for ipFamily. The CIDR block to assign Kubernetes service IP addresses from. + * If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 + * or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not + * overlap with resources in other networks that are peered or connected to your VPC. The + * block must meet the following requirements:

      *
        *
      • *

        Within one of the following private IP address blocks: 10.0.0.0/8, @@ -993,6 +1001,23 @@ export interface KubernetesNetworkConfigRequest { * */ serviceIpv4Cidr?: string; + + /** + *

        Specify which IP version is used to assign Kubernetes Pod and Service IP addresses. If + * you don't specify a value, ipv4 is used by default. You can only specify an + * IP family when you create a cluster and can't change this value once the cluster is + * created. If you specify ipv6, the VPC and subnets that you specify for + * cluster creation must have both IPv4 and IPv6 CIDR blocks assigned to them.

        + *

        You can only specify ipv6 for 1.21 and later clusters that use version + * 1.10.0 or later of the Amazon VPC CNI add-on. If you specify ipv6, then ensure + * that your VPC meets the requirements and that you're familiar with the considerations + * listed in Assigning + * IPv6 addresses to Pods and Services in the Amazon EKS User Guide. If + * you specify ipv6, Kubernetes assigns Service and Pod addresses from the + * unique local address range (fc00::/7). You can't specify a custom IPv6 CIDR + * block.

        + */ + ipFamily?: IpFamily | string; } export namespace KubernetesNetworkConfigRequest { @@ -1023,9 +1048,8 @@ export interface LogSetup { types?: (LogType | string)[]; /** - *

        If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs. If a - * log type isn't enabled, that log type doesn't export its control plane logs. Each - * individual log type can be enabled or disabled independently.

        + *

        If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs. If a log type isn't enabled, that log type doesn't export its control + * plane logs. Each individual log type can be enabled or disabled independently.

        */ enabled?: boolean; } @@ -1059,21 +1083,23 @@ export namespace Logging { } /** - *

        An object representing the VPC configuration to use for an Amazon EKS cluster.

        + *

        An object representing the VPC configuration to use for an Amazon EKS + * cluster.

        */ export interface VpcConfigRequest { /** - *

        Specify subnets for your Amazon EKS nodes. Amazon EKS creates cross-account elastic network - * interfaces in these subnets to allow communication between your nodes and the Kubernetes - * control plane.

        + *

        Specify subnets for your Amazon EKS nodes. Amazon EKS creates + * cross-account elastic network interfaces in these subnets to allow communication between + * your nodes and the Kubernetes control plane.

        */ subnetIds?: string[]; /** *

        Specify one or more security groups for the cross-account elastic network interfaces - * that Amazon EKS creates to use that allow communication between your nodes and the Kubernetes - * control plane. If you don't specify any security groups, then familiarize yourself with - * the difference between Amazon EKS defaults for clusters deployed with Kubernetes:

        + * that Amazon EKS creates to use that allow communication between your nodes and + * the Kubernetes control plane. If you don't specify any security groups, then familiarize + * yourself with the difference between Amazon EKS defaults for clusters deployed + * with Kubernetes:

        *
          *
        • *

          1.14 Amazon EKS platform version eks.2 and earlier

          @@ -1094,8 +1120,8 @@ export interface VpcConfigRequest { * Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes * API server can only receive requests from within the cluster VPC. The default value for * this parameter is true, which enables public access for your Kubernetes API - * server. For more information, see Amazon EKS cluster - * endpoint access control in the + * server. For more information, see Amazon EKS cluster endpoint access control in the + * * Amazon EKS User Guide * .

          */ @@ -1106,10 +1132,11 @@ export interface VpcConfigRequest { * Kubernetes API server endpoint. If you enable private access, Kubernetes API requests * from within your cluster's VPC use the private VPC endpoint. The default value for this * parameter is false, which disables private access for your Kubernetes API - * server. If you disable private access and you have nodes or Fargate pods in the - * cluster, then ensure that publicAccessCidrs includes the necessary CIDR - * blocks for communication with the nodes or Fargate pods. For more information, see Amazon EKS cluster - * endpoint access control in the + * server. If you disable private access and you have nodes or Fargate + * pods in the cluster, then ensure that publicAccessCidrs includes the + * necessary CIDR blocks for communication with the nodes or Fargate pods. + * For more information, see Amazon EKS cluster endpoint access control in + * the * Amazon EKS User Guide * .

          */ @@ -1119,9 +1146,10 @@ export interface VpcConfigRequest { *

          The CIDR blocks that are allowed access to your cluster's public Kubernetes API server * endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that * you specify is denied. The default value is 0.0.0.0/0. If you've disabled - * private endpoint access and you have nodes or Fargate pods in the cluster, then ensure - * that you specify the necessary CIDR blocks. For more information, see Amazon EKS cluster - * endpoint access control in the + * private endpoint access and you have nodes or Fargate pods in the + * cluster, then ensure that you specify the necessary CIDR blocks. For more information, + * see Amazon EKS cluster endpoint access control in the + * * Amazon EKS User Guide * .

          */ @@ -1150,10 +1178,9 @@ export interface CreateClusterRequest { version?: string; /** - *

          The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control - * plane to make calls to Amazon Web Services API operations on your behalf. For more information, see - * Amazon EKS - * Service IAM Role in the + *

          The Amazon Resource Name (ARN) of the IAM role that provides permissions for the + * Kubernetes control plane to make calls to Amazon Web Services API operations on your + * behalf. For more information, see Amazon EKS Service IAM Role in the * Amazon EKS User Guide * .

          */ @@ -1176,14 +1203,15 @@ export interface CreateClusterRequest { /** *

          Enable or disable exporting the Kubernetes control plane logs for your cluster to - * CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more - * information, see Amazon EKS Cluster control plane logs in the + * CloudWatch Logs. By default, cluster control plane logs aren't exported to + * CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the * * Amazon EKS User Guide * .

          * - *

          CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported - * control plane logs. For more information, see CloudWatch Pricing.

          + *

          CloudWatch Logs ingestion, archive storage, and data scanning rates apply to + * exported control plane logs. For more information, see CloudWatch + * Pricing.

          *
          */ logging?: Logging; @@ -1252,8 +1280,8 @@ export interface ConnectorConfigResponse { activationCode?: string; /** - *

          The expiration time of the connected cluster. The cluster's YAML file must be applied through the native - * provider.

          + *

          The expiration time of the connected cluster. The cluster's YAML file must be applied + * through the native provider.

          */ activationExpiry?: Date; @@ -1263,7 +1291,8 @@ export interface ConnectorConfigResponse { provider?: string; /** - *

          The Amazon Resource Name (ARN) of the role to communicate with services from the connected Kubernetes cluster.

          + *

          The Amazon Resource Name (ARN) of the role to communicate with services from the connected Kubernetes + * cluster.

          */ roleArn?: string; } @@ -1318,16 +1347,36 @@ export namespace Identity { } /** - *

          The Kubernetes network configuration for the cluster.

          + *

          The Kubernetes network configuration for the cluster. The response contains a value + * for serviceIpv6Cidr or serviceIpv4Cidr, but not both.

          */ export interface KubernetesNetworkConfigResponse { /** - *

          The CIDR block that Kubernetes service IP addresses are assigned from. If you didn't - * specify a CIDR block when you created the cluster, then Kubernetes assigns addresses - * from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this was specified, then - * it was specified when the cluster was created and it cannot be changed.

          + *

          The CIDR block that Kubernetes Pod and Service IP addresses are assigned from. + * Kubernetes assigns addresses from an IPv4 CIDR block assigned to a subnet that the node + * is in. If you didn't specify a CIDR block when you created the cluster, then Kubernetes + * assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this + * was specified, then it was specified when the cluster was created and it can't be + * changed.

          */ serviceIpv4Cidr?: string; + + /** + *

          The CIDR block that Kubernetes Pod and Service IP addresses are assigned from if you + * created a 1.21 or later cluster with version 1.10.0 or later of the Amazon VPC CNI add-on and + * specified ipv6 for ipFamily when you + * created the cluster. Kubernetes assigns addresses from the unique local address range + * (fc00::/7).

          + */ + serviceIpv6Cidr?: string; + + /** + *

          The IP family used to assign Kubernetes Pod and Service IP addresses. The IP family is + * always ipv4, unless you have a 1.21 or later cluster running + * version 1.10.0 or later of the Amazon VPC CNI add-on and specified ipv6 when you + * created the cluster.

          + */ + ipFamily?: IpFamily | string; } export namespace KubernetesNetworkConfigResponse { @@ -1340,7 +1389,8 @@ export namespace KubernetesNetworkConfigResponse { } /** - *

          An object representing an Amazon EKS cluster VPC configuration response.

          + *

          An object representing an Amazon EKS cluster VPC configuration + * response.

          */ export interface VpcConfigResponse { /** @@ -1356,8 +1406,9 @@ export interface VpcConfigResponse { securityGroupIds?: string[]; /** - *

          The cluster security group that was created by Amazon EKS for the cluster. Managed node - * groups use this security group for control-plane-to-data-plane communication.

          + *

          The cluster security group that was created by Amazon EKS for the cluster. + * Managed node groups use this security group for control-plane-to-data-plane + * communication.

          */ clusterSecurityGroupId?: string; @@ -1367,20 +1418,22 @@ export interface VpcConfigResponse { vpcId?: string; /** - *

          This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If - * the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server - * can only receive requests that originate from within the cluster VPC.

          + *

          This parameter indicates whether the Amazon EKS public API server endpoint is + * enabled. If the Amazon EKS public API server endpoint is disabled, your + * cluster's Kubernetes API server can only receive requests that originate from within the + * cluster VPC.

          */ endpointPublicAccess?: boolean; /** - *

          This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If - * the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate - * from within your cluster's VPC use the private VPC endpoint instead of traversing the - * internet. If this value is disabled and you have nodes or Fargate pods in the cluster, - * then ensure that publicAccessCidrs includes the necessary CIDR blocks for - * communication with the nodes or Fargate pods. For more information, see Amazon EKS cluster - * endpoint access control in the + *

          This parameter indicates whether the Amazon EKS private API server endpoint is + * enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes + * API requests that originate from within your cluster's VPC use the private VPC endpoint + * instead of traversing the internet. If this value is disabled and you have nodes or + * Fargate pods in the cluster, then ensure that + * publicAccessCidrs includes the necessary CIDR blocks for communication + * with the nodes or Fargate pods. For more information, see Amazon EKS cluster endpoint access control in the + * * Amazon EKS User Guide * .

          */ @@ -1390,9 +1443,9 @@ export interface VpcConfigResponse { *

          The CIDR blocks that are allowed access to your cluster's public Kubernetes API server * endpoint. Communication to the endpoint from addresses outside of the listed CIDR blocks * is denied. The default value is 0.0.0.0/0. If you've disabled private - * endpoint access and you have nodes or Fargate pods in the cluster, then ensure that the - * necessary CIDR blocks are listed. For more information, see Amazon EKS cluster - * endpoint access control in the + * endpoint access and you have nodes or Fargate pods in the cluster, + * then ensure that the necessary CIDR blocks are listed. For more information, see Amazon EKS cluster endpoint access control in the + * * Amazon EKS User Guide * .

          */ @@ -1440,16 +1493,17 @@ export interface Cluster { endpoint?: string; /** - *

          The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control - * plane to make calls to Amazon Web Services API operations on your behalf.

          + *

          The Amazon Resource Name (ARN) of the IAM role that provides permissions for the + * Kubernetes control plane to make calls to Amazon Web Services API operations on your + * behalf.

          */ roleArn?: string; /** - *

          The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have - * specific requirements to work properly with Kubernetes. For more information, see Cluster VPC - * Considerations and Cluster Security Group Considerations in the - * Amazon EKS User Guide.

          + *

          The VPC configuration used by the cluster control plane. Amazon EKS VPC + * resources have specific requirements to work properly with Kubernetes. For more + * information, see Cluster VPC Considerations and Cluster Security + * Group Considerations in the Amazon EKS User Guide.

          */ resourcesVpcConfig?: VpcConfigResponse; @@ -1485,8 +1539,8 @@ export interface Cluster { clientRequestToken?: string; /** - *

          The platform version of your Amazon EKS cluster. For more information, see Platform - * Versions in the + *

          The platform version of your Amazon EKS cluster. For more information, see + * Platform Versions in the * Amazon EKS User Guide * .

          */ @@ -1584,8 +1638,9 @@ export namespace ServiceUnavailableException { /** *

          At least one of your specified cluster subnets is in an Availability Zone that does - * not support Amazon EKS. The exception output specifies the supported Availability Zones for - * your account, from which you can choose subnets for your cluster.

          + * not support Amazon EKS. The exception output specifies the supported + * Availability Zones for your account, from which you can choose subnets for your + * cluster.

          */ export interface UnsupportedAvailabilityZoneException extends __SmithyException, $MetadataBearer { name: "UnsupportedAvailabilityZoneException"; @@ -1649,30 +1704,31 @@ export interface CreateFargateProfileRequest { fargateProfileName: string | undefined; /** - *

          The name of the Amazon EKS cluster to apply the Fargate profile to.

          + *

          The name of the Amazon EKS cluster to apply the Fargate profile + * to.

          */ clusterName: string | undefined; /** *

          The Amazon Resource Name (ARN) of the pod execution role to use for pods that match the selectors in - * the Fargate profile. The pod execution role allows Fargate infrastructure to register with - * your cluster as a node, and it provides read access to Amazon ECR image repositories. For - * more information, see Pod Execution Role in the - * Amazon EKS User Guide.

          + * the Fargate profile. The pod execution role allows Fargate + * infrastructure to register with your cluster as a node, and it provides read access to + * Amazon ECR image repositories. For more information, see Pod + * Execution Role in the Amazon EKS User Guide.

          */ podExecutionRoleArn: string | undefined; /** - *

          The IDs of subnets to launch your pods into. At this time, pods running on Fargate are - * not assigned public IP addresses, so only private subnets (with no direct route to an - * Internet Gateway) are accepted for this parameter.

          + *

          The IDs of subnets to launch your pods into. At this time, pods running on Fargate are not assigned public IP addresses, so only private subnets (with + * no direct route to an Internet Gateway) are accepted for this parameter.

          */ subnets?: string[]; /** - *

          The selectors to match for pods to use this Fargate profile. Each selector must have an - * associated namespace. Optionally, you can also specify labels for a namespace. You may - * specify up to five selectors in a Fargate profile.

          + *

          The selectors to match for pods to use this Fargate profile. Each + * selector must have an associated namespace. Optionally, you can also specify labels for + * a namespace. You may specify up to five selectors in a Fargate + * profile.

          */ selectors?: FargateProfileSelector[]; @@ -1717,12 +1773,14 @@ export interface FargateProfile { fargateProfileArn?: string; /** - *

          The name of the Amazon EKS cluster that the Fargate profile belongs to.

          + *

          The name of the Amazon EKS cluster that the Fargate profile + * belongs to.

          */ clusterName?: string; /** - *

          The Unix epoch timestamp in seconds for when the Fargate profile was created.

          + *

          The Unix epoch timestamp in seconds for when the Fargate profile was + * created.

          */ createdAt?: Date; @@ -1799,8 +1857,8 @@ export type CapacityTypes = "ON_DEMAND" | "SPOT"; * , or the node group deployment or * update will fail. For more information about launch templates, see * CreateLaunchTemplate - * in the Amazon EC2 API Reference. - * For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

          + * in the Amazon EC2 API + * Reference. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

          *

          Specify either name or id, but not both.

          */ export interface LaunchTemplateSpecification { @@ -1836,17 +1894,16 @@ export namespace LaunchTemplateSpecification { */ export interface RemoteAccessConfig { /** - *

          The Amazon EC2 SSH key that provides access for SSH communication with the nodes in the - * managed node group. For more information, see Amazon EC2 key - * pairs and Linux instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

          + *

          The Amazon EC2 SSH key that provides access for SSH communication with the + * nodes in the managed node group. For more information, see Amazon EC2 key pairs and Linux instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

          */ ec2SshKey?: string; /** *

          The security groups that are allowed SSH access (port 22) to the nodes. If you specify - * an Amazon EC2 SSH key but do not specify a source security group when you create a managed - * node group, then port 22 on the nodes is opened to the internet (0.0.0.0/0). For more - * information, see Security Groups for Your VPC in the + * an Amazon EC2 SSH key but do not specify a source security group when you create + * a managed node group, then port 22 on the nodes is opened to the internet (0.0.0.0/0). + * For more information, see Security Groups for Your VPC in the * Amazon Virtual Private Cloud User Guide.

          */ sourceSecurityGroups?: string[]; @@ -1875,14 +1932,13 @@ export interface NodegroupScalingConfig { /** *

          The maximum number of nodes that the managed node group can scale out to. For - * information about the maximum number that you can specify, see Amazon EKS service - * quotas in the Amazon EKS User Guide.

          + * information about the maximum number that you can specify, see Amazon EKS service quotas in the Amazon EKS User Guide.

          */ maxSize?: number; /** *

          The current number of nodes that the managed node group should maintain.

          - * + * *

          If you use Cluster Autoscaler, you shouldn't change the desiredSize value * directly, as this can cause the Cluster Autoscaler to suddenly scale up or scale * down.

          @@ -1954,15 +2010,16 @@ export namespace Taint { */ export interface NodegroupUpdateConfig { /** - *

          The maximum number of nodes unavailable at once during a version update. Nodes will be updated in parallel. - * This value or maxUnavailablePercentage is required to have a value.The maximum number - * is 100.

          + *

          The maximum number of nodes unavailable at once during a version update. Nodes will be + * updated in parallel. This value or maxUnavailablePercentage is required to + * have a value.The maximum number is 100.

          */ maxUnavailable?: number; /** - *

          The maximum percentage of nodes unavailable during a version update. This percentage of nodes will be - * updated in parallel, up to 100 nodes at once. This value or maxUnavailable is required to have a value.

          + *

          The maximum percentage of nodes unavailable during a version update. This percentage + * of nodes will be updated in parallel, up to 100 nodes at once. This value or + * maxUnavailable is required to have a value.

          */ maxUnavailablePercentage?: number; } @@ -2028,9 +2085,10 @@ export interface CreateNodegroupRequest { *

          The AMI type for your node group. GPU instance types should use the * AL2_x86_64_GPU AMI type. Non-GPU instances should use the * AL2_x86_64 AMI type. Arm instances should use the - * AL2_ARM_64 AMI type. All types use the Amazon EKS optimized Amazon Linux 2 AMI. - * If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify amiType, - * or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

          + * AL2_ARM_64 AMI type. All types use the Amazon EKS optimized + * Amazon Linux 2 AMI. If you specify launchTemplate, and your launch template uses a custom AMI, + * then don't specify amiType, or the node group deployment + * will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

          */ amiType?: AMITypes | string; @@ -2042,15 +2100,15 @@ export interface CreateNodegroupRequest { remoteAccess?: RemoteAccessConfig; /** - *

          The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker - * node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive - * permissions for these API calls through an IAM instance profile and associated - * policies. Before you can launch nodes and register them into a cluster, you must create - * an IAM role for those nodes to use when they are launched. For more information, see - * Amazon EKS node IAM role in the + *

          The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The + * Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls + * through an IAM instance profile and associated policies. Before you can + * launch nodes and register them into a cluster, you must create an IAM + * role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the + * * Amazon EKS User Guide - * . - * If you specify launchTemplate, then don't specify + * . If you specify launchTemplate, then don't specify + * * IamInstanceProfile * in your launch template, * or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

          @@ -2109,12 +2167,11 @@ export interface CreateNodegroupRequest { version?: string; /** - *

          The AMI version of the Amazon EKS optimized AMI to use with your node group. By default, - * the latest available AMI version for the node group's current Kubernetes version is - * used. For more information, see Amazon EKS - * optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide. If you specify launchTemplate, - * and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group - * deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

          + *

          The AMI version of the Amazon EKS optimized AMI to use with your node group. + * By default, the latest available AMI version for the node group's current Kubernetes + * version is used. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide. + * If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, + * or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

          */ releaseVersion?: string; } @@ -2158,40 +2215,40 @@ export interface Issue { *
            *
          • *

            - * AccessDenied: Amazon EKS or one or more of your - * managed nodes is failing to authenticate or authorize with your Kubernetes - * cluster API server.

            + * AccessDenied: Amazon EKS or one or + * more of your managed nodes is failing to authenticate or authorize with your + * Kubernetes cluster API server.

            *
          • *
          • *

            - * AsgInstanceLaunchFailures: Your Auto Scaling group is - * experiencing failures while attempting to launch instances.

            + * AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch + * instances.

            *
          • *
          • *

            * AutoScalingGroupNotFound: We couldn't find - * the Auto Scaling group associated with the managed node group. You may be able to - * recreate an Auto Scaling group with the same settings to recover.

            + * the Auto Scaling group associated with the managed node group. You may be + * able to recreate an Auto Scaling group with the same settings to + * recover.

            *
          • *
          • *

            - * ClusterUnreachable: Amazon EKS or one or more of - * your managed nodes is unable to to communicate with your Kubernetes cluster API - * server. This can happen if there are network disruptions or if API servers are - * timing out processing requests.

            + * ClusterUnreachable: Amazon EKS or one + * or more of your managed nodes is unable to to communicate with your Kubernetes + * cluster API server. This can happen if there are network disruptions or if API + * servers are timing out processing requests.

            *
          • *
          • *

            * Ec2LaunchTemplateNotFound: We couldn't find - * the Amazon EC2 launch template for your managed node group. You may be able to - * recreate a launch template with the same settings to recover.

            + * the Amazon EC2 launch template for your managed node group. You may be + * able to recreate a launch template with the same settings to recover.

            *
          • *
          • *

            - * Ec2LaunchTemplateVersionMismatch: The Amazon EC2 - * launch template version for your managed node group does not match the version - * that Amazon EKS created. You may be able to revert to the version that Amazon EKS created - * to recover.

            + * Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not + * match the version that Amazon EKS created. You may be able to revert to + * the version that Amazon EKS created to recover.

            *
          • *
          • *

            @@ -2208,30 +2265,31 @@ export interface Issue { *

          • *

            * Ec2SubnetInvalidConfiguration: One or more - * Amazon EC2 subnets specified for a node group do not automatically assign public IP - * addresses to instances launched into it. If you want your instances to be - * assigned a public IP address, then you need to enable the auto-assign - * public IP address setting for the subnet. See Modifying + * Amazon EC2 subnets specified for a node group do not automatically + * assign public IP addresses to instances launched into it. If you want your + * instances to be assigned a public IP address, then you need to enable the + * auto-assign public IP address setting for the subnet. See + * Modifying * the public IPv4 addressing attribute for your subnet in the Amazon * VPC User Guide.

            *
          • *
          • *

            * IamInstanceProfileNotFound: We couldn't find - * the IAM instance profile for your managed node group. You may be able to - * recreate an instance profile with the same settings to recover.

            + * the IAM instance profile for your managed node group. You may be + * able to recreate an instance profile with the same settings to recover.

            *
          • *
          • *

            * IamNodeRoleNotFound: We couldn't find the - * IAM role for your managed node group. You may be able to recreate an IAM role - * with the same settings to recover.

            + * IAM role for your managed node group. You may be able to + * recreate an IAM role with the same settings to recover.

            *
          • *
          • *

            - * InstanceLimitExceeded: Your Amazon Web Services account is - * unable to launch any more instances of the specified instance type. You may be - * able to request an Amazon EC2 instance limit increase to recover.

            + * InstanceLimitExceeded: Your Amazon Web Services account is unable to launch any more instances of the specified instance + * type. You may be able to request an Amazon EC2 instance limit increase + * to recover.

            *
          • *
          • *

            @@ -2356,8 +2414,8 @@ export interface Nodegroup { /** *

            If the node group was deployed using a launch template with a custom AMI, then this is * the AMI ID that was specified in the launch template. For node groups that weren't - * deployed using a launch template, this is the version of the Amazon EKS optimized AMI that - * the node group was deployed with.

            + * deployed using a launch template, this is the version of the Amazon EKS + * optimized AMI that the node group was deployed with.

            */ releaseVersion?: string; @@ -2417,26 +2475,27 @@ export interface Nodegroup { amiType?: AMITypes | string; /** - *

            The IAM role associated with your node group. The Amazon EKS node kubelet - * daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API - * calls through an IAM instance profile and associated policies.

            + *

            The IAM role associated with your node group. The Amazon EKS + * node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. + * Nodes receive permissions for these API calls through an IAM instance + * profile and associated policies.

            */ nodeRole?: string; /** *

            The Kubernetes labels applied to the nodes in the node group.

            * - *

            Only labels that are applied with the Amazon EKS API are shown here. There may be other - * Kubernetes labels applied to the nodes in this group.

            + *

            Only labels that are applied with the Amazon EKS API are shown here. There + * may be other Kubernetes labels applied to the nodes in this group.

            *
            */ labels?: { [key: string]: string }; /** *

            The Kubernetes taints to be applied to the nodes in the node group when they are - * created. Effect is one of No_Schedule, Prefer_No_Schedule, or No_Execute. Kubernetes taints - * can be used together with tolerations to control how workloads are scheduled to your - * nodes.

            + * created. Effect is one of No_Schedule, Prefer_No_Schedule, or + * No_Execute. Kubernetes taints can be used together with tolerations to + * control how workloads are scheduled to your nodes.

            */ taints?: Taint[]; @@ -2518,7 +2577,8 @@ export interface DeleteAddonRequest { addonName: string | undefined; /** - *

            Specifying this option preserves the add-on software on your cluster but Amazon EKS stops managing any settings for the add-on. If an IAM account is associated with the add-on, it is not removed.

            + *

            Specifying this option preserves the add-on software on your cluster but Amazon EKS stops managing any settings for the add-on. If an IAM + * account is associated with the add-on, it is not removed.

            */ preserve?: boolean; } @@ -2583,7 +2643,8 @@ export namespace DeleteClusterResponse { export interface DeleteFargateProfileRequest { /** - *

            The name of the Amazon EKS cluster associated with the Fargate profile to delete.

            + *

            The name of the Amazon EKS cluster associated with the Fargate + * profile to delete.

            */ clusterName: string | undefined; @@ -2620,7 +2681,8 @@ export namespace DeleteFargateProfileResponse { export interface DeleteNodegroupRequest { /** - *

            The name of the Amazon EKS cluster that is associated with your node group.

            + *

            The name of the Amazon EKS cluster that is associated with your node + * group.

            */ clusterName: string | undefined; @@ -2829,7 +2891,8 @@ export namespace DescribeClusterResponse { export interface DescribeFargateProfileRequest { /** - *

            The name of the Amazon EKS cluster associated with the Fargate profile.

            + *

            The name of the Amazon EKS cluster associated with the Fargate + * profile.

            */ clusterName: string | undefined; @@ -3339,7 +3402,8 @@ export namespace ListFargateProfilesRequest { export interface ListFargateProfilesResponse { /** - *

            A list of all of the Fargate profiles associated with the specified cluster.

            + *

            A list of all of the Fargate profiles associated with the specified + * cluster.

            */ fargateProfileNames?: string[]; @@ -3426,7 +3490,8 @@ export namespace ListIdentityProviderConfigsResponse { export interface ListNodegroupsRequest { /** - *

            The name of the Amazon EKS cluster that you would like to list node groups in.

            + *

            The name of the Amazon EKS cluster that you would like to list node groups + * in.

            */ clusterName: string | undefined; @@ -3644,7 +3709,8 @@ export enum ConnectorConfigProvider { */ export interface ConnectorConfigRequest { /** - *

            The Amazon Resource Name (ARN) of the role that is authorized to request the connector configuration.

            + *

            The Amazon Resource Name (ARN) of the role that is authorized to request the connector + * configuration.

            */ roleArn: string | undefined; @@ -3675,7 +3741,8 @@ export interface RegisterClusterRequest { connectorConfig: ConnectorConfigRequest | undefined; /** - *

            Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

            + *

            Unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request.

            */ clientRequestToken?: string; @@ -3874,20 +3941,22 @@ export interface UpdateClusterConfigRequest { name: string | undefined; /** - *

            An object representing the VPC configuration to use for an Amazon EKS cluster.

            + *

            An object representing the VPC configuration to use for an Amazon EKS + * cluster.

            */ resourcesVpcConfig?: VpcConfigRequest; /** *

            Enable or disable exporting the Kubernetes control plane logs for your cluster to - * CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more - * information, see Amazon EKS cluster control plane logs in the + * CloudWatch Logs. By default, cluster control plane logs aren't exported to + * CloudWatch Logs. For more information, see Amazon EKS cluster control plane logs in the * * Amazon EKS User Guide * .

            * - *

            CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported - * control plane logs. For more information, see CloudWatch Pricing.

            + *

            CloudWatch Logs ingestion, archive storage, and data scanning rates apply to + * exported control plane logs. For more information, see CloudWatch + * Pricing.

            *
            */ logging?: Logging; @@ -4017,7 +4086,8 @@ export namespace UpdateTaintsPayload { export interface UpdateNodegroupConfigRequest { /** - *

            The name of the Amazon EKS cluster that the managed node group resides in.

            + *

            The name of the Amazon EKS cluster that the managed node group resides + * in.

            */ clusterName: string | undefined; @@ -4082,8 +4152,8 @@ export namespace UpdateNodegroupConfigResponse { export interface UpdateNodegroupVersionRequest { /** - *

            The name of the Amazon EKS cluster that is associated with the managed node group to - * update.

            + *

            The name of the Amazon EKS cluster that is associated with the managed node + * group to update.

            */ clusterName: string | undefined; @@ -4103,12 +4173,11 @@ export interface UpdateNodegroupVersionRequest { version?: string; /** - *

            The AMI version of the Amazon EKS optimized AMI to use for the update. By default, the - * latest available AMI version for the node group's Kubernetes version is used. For more - * information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the - * Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify - * releaseVersion, or the node group update will fail. - * For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

            + *

            The AMI version of the Amazon EKS optimized AMI to use for the update. By + * default, the latest available AMI version for the node group's Kubernetes version is + * used. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide. + * If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion, + * or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

            */ releaseVersion?: string; diff --git a/clients/client-eks/src/protocols/Aws_restJson1.ts b/clients/client-eks/src/protocols/Aws_restJson1.ts index 349a12e474eb..10e255fb8108 100644 --- a/clients/client-eks/src/protocols/Aws_restJson1.ts +++ b/clients/client-eks/src/protocols/Aws_restJson1.ts @@ -4843,6 +4843,7 @@ const serializeAws_restJson1KubernetesNetworkConfigRequest = ( context: __SerdeContext ): any => { return { + ...(input.ipFamily !== undefined && input.ipFamily !== null && { ipFamily: input.ipFamily }), ...(input.serviceIpv4Cidr !== undefined && input.serviceIpv4Cidr !== null && { serviceIpv4Cidr: input.serviceIpv4Cidr }), }; @@ -5473,7 +5474,9 @@ const deserializeAws_restJson1KubernetesNetworkConfigResponse = ( context: __SerdeContext ): KubernetesNetworkConfigResponse => { return { + ipFamily: __expectString(output.ipFamily), serviceIpv4Cidr: __expectString(output.serviceIpv4Cidr), + serviceIpv6Cidr: __expectString(output.serviceIpv6Cidr), } as any; }; diff --git a/clients/client-elasticsearch-service/src/models/models_0.ts b/clients/client-elasticsearch-service/src/models/models_0.ts index 8eee6c3cd4dd..4d4baab7295b 100644 --- a/clients/client-elasticsearch-service/src/models/models_0.ts +++ b/clients/client-elasticsearch-service/src/models/models_0.ts @@ -567,6 +567,16 @@ export interface AdvancedSecurityOptions { *

            Describes the SAML application configured for a domain.

            */ SAMLOptions?: SAMLOptionsOutput; + + /** + *

            Specifies the Anonymous Auth Disable Date when Anonymous Auth is enabled.

            + */ + AnonymousAuthDisableDate?: Date; + + /** + *

            True if Anonymous auth is enabled. Anonymous auth can be enabled only when AdvancedSecurity is enabled on existing domains.

            + */ + AnonymousAuthEnabled?: boolean; } export namespace AdvancedSecurityOptions { @@ -682,6 +692,11 @@ export interface AdvancedSecurityOptionsInput { *

            Specifies the SAML application configuration for the domain.

            */ SAMLOptions?: SAMLOptionsInput; + + /** + *

            True if Anonymous auth is enabled. Anonymous auth can be enabled only when AdvancedSecurity is enabled on existing domains.

            + */ + AnonymousAuthEnabled?: boolean; } export namespace AdvancedSecurityOptionsInput { diff --git a/clients/client-elasticsearch-service/src/protocols/Aws_restJson1.ts b/clients/client-elasticsearch-service/src/protocols/Aws_restJson1.ts index 937a4564caf8..4fa51aad6b35 100644 --- a/clients/client-elasticsearch-service/src/protocols/Aws_restJson1.ts +++ b/clients/client-elasticsearch-service/src/protocols/Aws_restJson1.ts @@ -5148,6 +5148,8 @@ const serializeAws_restJson1AdvancedSecurityOptionsInput = ( context: __SerdeContext ): any => { return { + ...(input.AnonymousAuthEnabled !== undefined && + input.AnonymousAuthEnabled !== null && { AnonymousAuthEnabled: input.AnonymousAuthEnabled }), ...(input.Enabled !== undefined && input.Enabled !== null && { Enabled: input.Enabled }), ...(input.InternalUserDatabaseEnabled !== undefined && input.InternalUserDatabaseEnabled !== null && { InternalUserDatabaseEnabled: input.InternalUserDatabaseEnabled }), @@ -5574,6 +5576,11 @@ const deserializeAws_restJson1AdvancedSecurityOptions = ( context: __SerdeContext ): AdvancedSecurityOptions => { return { + AnonymousAuthDisableDate: + output.AnonymousAuthDisableDate !== undefined && output.AnonymousAuthDisableDate !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.AnonymousAuthDisableDate))) + : undefined, + AnonymousAuthEnabled: __expectBoolean(output.AnonymousAuthEnabled), Enabled: __expectBoolean(output.Enabled), InternalUserDatabaseEnabled: __expectBoolean(output.InternalUserDatabaseEnabled), SAMLOptions: diff --git a/clients/client-glue/src/Glue.ts b/clients/client-glue/src/Glue.ts index 9cbc64bc099b..b83cb51304da 100644 --- a/clients/client-glue/src/Glue.ts +++ b/clients/client-glue/src/Glue.ts @@ -445,6 +445,21 @@ import { import { GetTagsCommand, GetTagsCommandInput, GetTagsCommandOutput } from "./commands/GetTagsCommand"; import { GetTriggerCommand, GetTriggerCommandInput, GetTriggerCommandOutput } from "./commands/GetTriggerCommand"; import { GetTriggersCommand, GetTriggersCommandInput, GetTriggersCommandOutput } from "./commands/GetTriggersCommand"; +import { + GetUnfilteredPartitionMetadataCommand, + GetUnfilteredPartitionMetadataCommandInput, + GetUnfilteredPartitionMetadataCommandOutput, +} from "./commands/GetUnfilteredPartitionMetadataCommand"; +import { + GetUnfilteredPartitionsMetadataCommand, + GetUnfilteredPartitionsMetadataCommandInput, + GetUnfilteredPartitionsMetadataCommandOutput, +} from "./commands/GetUnfilteredPartitionsMetadataCommand"; +import { + GetUnfilteredTableMetadataCommand, + GetUnfilteredTableMetadataCommandInput, + GetUnfilteredTableMetadataCommandOutput, +} from "./commands/GetUnfilteredTableMetadataCommand"; import { GetUserDefinedFunctionCommand, GetUserDefinedFunctionCommandInput, @@ -4050,6 +4065,93 @@ export class Glue extends GlueClient { } } + public getUnfilteredPartitionMetadata( + args: GetUnfilteredPartitionMetadataCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getUnfilteredPartitionMetadata( + args: GetUnfilteredPartitionMetadataCommandInput, + cb: (err: any, data?: GetUnfilteredPartitionMetadataCommandOutput) => void + ): void; + public getUnfilteredPartitionMetadata( + args: GetUnfilteredPartitionMetadataCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetUnfilteredPartitionMetadataCommandOutput) => void + ): void; + public getUnfilteredPartitionMetadata( + args: GetUnfilteredPartitionMetadataCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetUnfilteredPartitionMetadataCommandOutput) => void), + cb?: (err: any, data?: GetUnfilteredPartitionMetadataCommandOutput) => void + ): Promise | void { + const command = new GetUnfilteredPartitionMetadataCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + public getUnfilteredPartitionsMetadata( + args: GetUnfilteredPartitionsMetadataCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getUnfilteredPartitionsMetadata( + args: GetUnfilteredPartitionsMetadataCommandInput, + cb: (err: any, data?: GetUnfilteredPartitionsMetadataCommandOutput) => void + ): void; + public getUnfilteredPartitionsMetadata( + args: GetUnfilteredPartitionsMetadataCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetUnfilteredPartitionsMetadataCommandOutput) => void + ): void; + public getUnfilteredPartitionsMetadata( + args: GetUnfilteredPartitionsMetadataCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetUnfilteredPartitionsMetadataCommandOutput) => void), + cb?: (err: any, data?: GetUnfilteredPartitionsMetadataCommandOutput) => void + ): Promise | void { + const command = new GetUnfilteredPartitionsMetadataCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + public getUnfilteredTableMetadata( + args: GetUnfilteredTableMetadataCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getUnfilteredTableMetadata( + args: GetUnfilteredTableMetadataCommandInput, + cb: (err: any, data?: GetUnfilteredTableMetadataCommandOutput) => void + ): void; + public getUnfilteredTableMetadata( + args: GetUnfilteredTableMetadataCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetUnfilteredTableMetadataCommandOutput) => void + ): void; + public getUnfilteredTableMetadata( + args: GetUnfilteredTableMetadataCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetUnfilteredTableMetadataCommandOutput) => void), + cb?: (err: any, data?: GetUnfilteredTableMetadataCommandOutput) => void + ): Promise | void { + const command = new GetUnfilteredTableMetadataCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

            Retrieves a specified function definition from the Data Catalog.

            */ diff --git a/clients/client-glue/src/GlueClient.ts b/clients/client-glue/src/GlueClient.ts index 9dfdef316a4e..fce78e0f4509 100644 --- a/clients/client-glue/src/GlueClient.ts +++ b/clients/client-glue/src/GlueClient.ts @@ -235,6 +235,18 @@ import { GetTableVersionsCommandInput, GetTableVersionsCommandOutput } from "./c import { GetTagsCommandInput, GetTagsCommandOutput } from "./commands/GetTagsCommand"; import { GetTriggerCommandInput, GetTriggerCommandOutput } from "./commands/GetTriggerCommand"; import { GetTriggersCommandInput, GetTriggersCommandOutput } from "./commands/GetTriggersCommand"; +import { + GetUnfilteredPartitionMetadataCommandInput, + GetUnfilteredPartitionMetadataCommandOutput, +} from "./commands/GetUnfilteredPartitionMetadataCommand"; +import { + GetUnfilteredPartitionsMetadataCommandInput, + GetUnfilteredPartitionsMetadataCommandOutput, +} from "./commands/GetUnfilteredPartitionsMetadataCommand"; +import { + GetUnfilteredTableMetadataCommandInput, + GetUnfilteredTableMetadataCommandOutput, +} from "./commands/GetUnfilteredTableMetadataCommand"; import { GetUserDefinedFunctionCommandInput, GetUserDefinedFunctionCommandOutput, @@ -464,6 +476,9 @@ export type ServiceInputTypes = | GetTagsCommandInput | GetTriggerCommandInput | GetTriggersCommandInput + | GetUnfilteredPartitionMetadataCommandInput + | GetUnfilteredPartitionsMetadataCommandInput + | GetUnfilteredTableMetadataCommandInput | GetUserDefinedFunctionCommandInput | GetUserDefinedFunctionsCommandInput | GetWorkflowCommandInput @@ -632,6 +647,9 @@ export type ServiceOutputTypes = | GetTagsCommandOutput | GetTriggerCommandOutput | GetTriggersCommandOutput + | GetUnfilteredPartitionMetadataCommandOutput + | GetUnfilteredPartitionsMetadataCommandOutput + | GetUnfilteredTableMetadataCommandOutput | GetUserDefinedFunctionCommandOutput | GetUserDefinedFunctionsCommandOutput | GetWorkflowCommandOutput diff --git a/clients/client-glue/src/commands/GetCrawlerMetricsCommand.ts b/clients/client-glue/src/commands/GetCrawlerMetricsCommand.ts index 9f4ff8eb7d08..8a84e8b9dc22 100644 --- a/clients/client-glue/src/commands/GetCrawlerMetricsCommand.ts +++ b/clients/client-glue/src/commands/GetCrawlerMetricsCommand.ts @@ -12,7 +12,8 @@ import { } from "@aws-sdk/types"; import { GlueClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GlueClient"; -import { GetCrawlerMetricsRequest, GetCrawlerMetricsResponse } from "../models/models_0"; +import { GetCrawlerMetricsRequest } from "../models/models_0"; +import { GetCrawlerMetricsResponse } from "../models/models_1"; import { deserializeAws_json1_1GetCrawlerMetricsCommand, serializeAws_json1_1GetCrawlerMetricsCommand, diff --git a/clients/client-glue/src/commands/GetCrawlersCommand.ts b/clients/client-glue/src/commands/GetCrawlersCommand.ts index 09234be6116c..90316885901b 100644 --- a/clients/client-glue/src/commands/GetCrawlersCommand.ts +++ b/clients/client-glue/src/commands/GetCrawlersCommand.ts @@ -12,7 +12,7 @@ import { } from "@aws-sdk/types"; import { GlueClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GlueClient"; -import { GetCrawlersRequest, GetCrawlersResponse } from "../models/models_0"; +import { GetCrawlersRequest, GetCrawlersResponse } from "../models/models_1"; import { deserializeAws_json1_1GetCrawlersCommand, serializeAws_json1_1GetCrawlersCommand, diff --git a/clients/client-glue/src/commands/GetUnfilteredPartitionMetadataCommand.ts b/clients/client-glue/src/commands/GetUnfilteredPartitionMetadataCommand.ts new file mode 100644 index 000000000000..f1e567c0b034 --- /dev/null +++ b/clients/client-glue/src/commands/GetUnfilteredPartitionMetadataCommand.ts @@ -0,0 +1,86 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GlueClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GlueClient"; +import { GetUnfilteredPartitionMetadataRequest, GetUnfilteredPartitionMetadataResponse } from "../models/models_1"; +import { + deserializeAws_json1_1GetUnfilteredPartitionMetadataCommand, + serializeAws_json1_1GetUnfilteredPartitionMetadataCommand, +} from "../protocols/Aws_json1_1"; + +export interface GetUnfilteredPartitionMetadataCommandInput extends GetUnfilteredPartitionMetadataRequest {} +export interface GetUnfilteredPartitionMetadataCommandOutput + extends GetUnfilteredPartitionMetadataResponse, + __MetadataBearer {} + +export class GetUnfilteredPartitionMetadataCommand extends $Command< + GetUnfilteredPartitionMetadataCommandInput, + GetUnfilteredPartitionMetadataCommandOutput, + GlueClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetUnfilteredPartitionMetadataCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: GlueClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "GlueClient"; + const commandName = "GetUnfilteredPartitionMetadataCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetUnfilteredPartitionMetadataRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetUnfilteredPartitionMetadataResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: GetUnfilteredPartitionMetadataCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1GetUnfilteredPartitionMetadataCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1GetUnfilteredPartitionMetadataCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-glue/src/commands/GetUnfilteredPartitionsMetadataCommand.ts b/clients/client-glue/src/commands/GetUnfilteredPartitionsMetadataCommand.ts new file mode 100644 index 000000000000..b762648d7299 --- /dev/null +++ b/clients/client-glue/src/commands/GetUnfilteredPartitionsMetadataCommand.ts @@ -0,0 +1,86 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GlueClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GlueClient"; +import { GetUnfilteredPartitionsMetadataRequest, GetUnfilteredPartitionsMetadataResponse } from "../models/models_1"; +import { + deserializeAws_json1_1GetUnfilteredPartitionsMetadataCommand, + serializeAws_json1_1GetUnfilteredPartitionsMetadataCommand, +} from "../protocols/Aws_json1_1"; + +export interface GetUnfilteredPartitionsMetadataCommandInput extends GetUnfilteredPartitionsMetadataRequest {} +export interface GetUnfilteredPartitionsMetadataCommandOutput + extends GetUnfilteredPartitionsMetadataResponse, + __MetadataBearer {} + +export class GetUnfilteredPartitionsMetadataCommand extends $Command< + GetUnfilteredPartitionsMetadataCommandInput, + GetUnfilteredPartitionsMetadataCommandOutput, + GlueClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetUnfilteredPartitionsMetadataCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: GlueClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "GlueClient"; + const commandName = "GetUnfilteredPartitionsMetadataCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetUnfilteredPartitionsMetadataRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetUnfilteredPartitionsMetadataResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: GetUnfilteredPartitionsMetadataCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1GetUnfilteredPartitionsMetadataCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1GetUnfilteredPartitionsMetadataCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-glue/src/commands/GetUnfilteredTableMetadataCommand.ts b/clients/client-glue/src/commands/GetUnfilteredTableMetadataCommand.ts new file mode 100644 index 000000000000..7f27d6ba3bc5 --- /dev/null +++ b/clients/client-glue/src/commands/GetUnfilteredTableMetadataCommand.ts @@ -0,0 +1,81 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GlueClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GlueClient"; +import { GetUnfilteredTableMetadataRequest, GetUnfilteredTableMetadataResponse } from "../models/models_1"; +import { + deserializeAws_json1_1GetUnfilteredTableMetadataCommand, + serializeAws_json1_1GetUnfilteredTableMetadataCommand, +} from "../protocols/Aws_json1_1"; + +export interface GetUnfilteredTableMetadataCommandInput extends GetUnfilteredTableMetadataRequest {} +export interface GetUnfilteredTableMetadataCommandOutput extends GetUnfilteredTableMetadataResponse, __MetadataBearer {} + +export class GetUnfilteredTableMetadataCommand extends $Command< + GetUnfilteredTableMetadataCommandInput, + GetUnfilteredTableMetadataCommandOutput, + GlueClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetUnfilteredTableMetadataCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: GlueClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "GlueClient"; + const commandName = "GetUnfilteredTableMetadataCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetUnfilteredTableMetadataRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetUnfilteredTableMetadataResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetUnfilteredTableMetadataCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1GetUnfilteredTableMetadataCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1GetUnfilteredTableMetadataCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-glue/src/commands/index.ts b/clients/client-glue/src/commands/index.ts index 7e4a5a571a94..30b5873d52c6 100644 --- a/clients/client-glue/src/commands/index.ts +++ b/clients/client-glue/src/commands/index.ts @@ -103,6 +103,9 @@ export * from "./GetTablesCommand"; export * from "./GetTagsCommand"; export * from "./GetTriggerCommand"; export * from "./GetTriggersCommand"; +export * from "./GetUnfilteredPartitionMetadataCommand"; +export * from "./GetUnfilteredPartitionsMetadataCommand"; +export * from "./GetUnfilteredTableMetadataCommand"; export * from "./GetUserDefinedFunctionCommand"; export * from "./GetUserDefinedFunctionsCommand"; export * from "./GetWorkflowCommand"; diff --git a/clients/client-glue/src/models/models_0.ts b/clients/client-glue/src/models/models_0.ts index b6605d099bab..18e7670724c7 100644 --- a/clients/client-glue/src/models/models_0.ts +++ b/clients/client-glue/src/models/models_0.ts @@ -113,6 +113,19 @@ export namespace AlreadyExistsException { }); } +export interface AuditContext { + AdditionalAuditContext?: string; +} + +export namespace AuditContext { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AuditContext): any => ({ + ...obj, + }); +} + /** *

            A column in a Table.

            */ @@ -308,6 +321,7 @@ export interface StorageDescriptor { */ Location?: string; + AdditionalLocations?: string[]; /** *

            The input format: SequenceFileInputFormat (binary), * or TextInputFormat, or a custom format.

            @@ -1125,6 +1139,20 @@ export namespace BatchGetCrawlersRequest { }); } +export interface LakeFormationConfiguration { + UseLakeFormationCredentials?: boolean; + AccountId?: string; +} + +export namespace LakeFormationConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LakeFormationConfiguration): any => ({ + ...obj, + }); +} + export enum LastCrawlStatus { CANCELLED = "CANCELLED", FAILED = "FAILED", @@ -1325,6 +1353,11 @@ export interface CatalogTarget { *

            A list of the tables to be synchronized.

            */ Tables: string[] | undefined; + + /** + *

            The name of the connection for an Amazon S3-backed Data Catalog table to be a target of the crawl when using a Catalog connection type paired with a NETWORK Connection type.

            + */ + ConnectionName?: string; } export namespace CatalogTarget { @@ -1336,6 +1369,35 @@ export namespace CatalogTarget { }); } +/** + *

            Specifies a Delta data store to crawl one or more Delta tables.

            + */ +export interface DeltaTarget { + /** + *

            A list of the Amazon S3 paths to the Delta tables.

            + */ + DeltaTables?: string[]; + + /** + *

            The name of the connection to use to connect to the Delta table target.

            + */ + ConnectionName?: string; + + /** + *

            Specifies whether to write the manifest files to the Delta table path.

            + */ + WriteManifest?: boolean; +} + +export namespace DeltaTarget { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeltaTarget): any => ({ + ...obj, + }); +} + /** *

            Specifies an Amazon DynamoDB table to crawl.

            */ @@ -1503,6 +1565,11 @@ export interface CrawlerTargets { *

            Specifies Glue Data Catalog targets.

            */ CatalogTargets?: CatalogTarget[]; + + /** + *

            Specifies Delta data store targets.

            + */ + DeltaTargets?: DeltaTarget[]; } export namespace CrawlerTargets { @@ -1621,6 +1688,8 @@ export interface Crawler { * crawler.

            */ CrawlerSecurityConfiguration?: string; + + LakeFormationConfiguration?: LakeFormationConfiguration; } export namespace Crawler { @@ -4021,6 +4090,7 @@ export interface CreateCrawlerRequest { */ LineageConfiguration?: LineageConfiguration; + LakeFormationConfiguration?: LakeFormationConfiguration; /** *

            Crawler configuration information. This versioned JSON * string allows users to specify aspects of a crawler's behavior. @@ -8607,68 +8677,3 @@ export namespace CrawlerMetrics { ...obj, }); } - -export interface GetCrawlerMetricsResponse { - /** - *

            A list of metrics for the specified crawler.

            - */ - CrawlerMetricsList?: CrawlerMetrics[]; - - /** - *

            A continuation token, if the returned list does not contain the - * last metric available.

            - */ - NextToken?: string; -} - -export namespace GetCrawlerMetricsResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetCrawlerMetricsResponse): any => ({ - ...obj, - }); -} - -export interface GetCrawlersRequest { - /** - *

            The number of crawlers to return on each call.

            - */ - MaxResults?: number; - - /** - *

            A continuation token, if this is a continuation request.

            - */ - NextToken?: string; -} - -export namespace GetCrawlersRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetCrawlersRequest): any => ({ - ...obj, - }); -} - -export interface GetCrawlersResponse { - /** - *

            A list of crawler metadata.

            - */ - Crawlers?: Crawler[]; - - /** - *

            A continuation token, if the returned list has not reached the end - * of those defined in this customer account.

            - */ - NextToken?: string; -} - -export namespace GetCrawlersResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetCrawlersResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-glue/src/models/models_1.ts b/clients/client-glue/src/models/models_1.ts index e46735516b50..fc935035acf6 100644 --- a/clients/client-glue/src/models/models_1.ts +++ b/clients/client-glue/src/models/models_1.ts @@ -2,6 +2,7 @@ import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException import { Action, + AuditContext, CodeGenEdge, CodeGenNode, CodeGenNodeArg, @@ -10,6 +11,8 @@ import { Compatibility, ConnectionInput, ConnectionsList, + Crawler, + CrawlerMetrics, CrawlerTargets, CsvHeaderOption, DatabaseIdentifier, @@ -24,6 +27,7 @@ import { Job, JobCommand, JobRun, + LakeFormationConfiguration, Language, LineageConfiguration, NotificationProperty, @@ -55,6 +59,71 @@ import { WorkflowRun, } from "./models_0"; +export interface GetCrawlerMetricsResponse { + /** + *

            A list of metrics for the specified crawler.

            + */ + CrawlerMetricsList?: CrawlerMetrics[]; + + /** + *

            A continuation token, if the returned list does not contain the + * last metric available.

            + */ + NextToken?: string; +} + +export namespace GetCrawlerMetricsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetCrawlerMetricsResponse): any => ({ + ...obj, + }); +} + +export interface GetCrawlersRequest { + /** + *

            The number of crawlers to return on each call.

            + */ + MaxResults?: number; + + /** + *

            A continuation token, if this is a continuation request.

            + */ + NextToken?: string; +} + +export namespace GetCrawlersRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetCrawlersRequest): any => ({ + ...obj, + }); +} + +export interface GetCrawlersResponse { + /** + *

            A list of crawler metadata.

            + */ + Crawlers?: Crawler[]; + + /** + *

            A continuation token, if the returned list has not reached the end + * of those defined in this customer account.

            + */ + NextToken?: string; +} + +export namespace GetCrawlersResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetCrawlersResponse): any => ({ + ...obj, + }); +} + export interface GetDatabaseRequest { /** *

            The ID of the Data Catalog in which the database resides. If none is provided, the Amazon Web Services @@ -3546,6 +3615,173 @@ export namespace GetTriggersResponse { }); } +export enum PermissionType { + CELL_FILTER_PERMISSION = "CELL_FILTER_PERMISSION", + COLUMN_PERMISSION = "COLUMN_PERMISSION", +} + +export interface GetUnfilteredPartitionMetadataRequest { + CatalogId: string | undefined; + DatabaseName: string | undefined; + TableName: string | undefined; + PartitionValues: string[] | undefined; + AuditContext?: AuditContext; + SupportedPermissionTypes: (PermissionType | string)[] | undefined; +} + +export namespace GetUnfilteredPartitionMetadataRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUnfilteredPartitionMetadataRequest): any => ({ + ...obj, + }); +} + +export interface GetUnfilteredPartitionMetadataResponse { + /** + *

            Represents a slice of table data.

            + */ + Partition?: Partition; + + AuthorizedColumns?: string[]; + IsRegisteredWithLakeFormation?: boolean; +} + +export namespace GetUnfilteredPartitionMetadataResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUnfilteredPartitionMetadataResponse): any => ({ + ...obj, + }); +} + +export interface PermissionTypeMismatchException extends __SmithyException, $MetadataBearer { + name: "PermissionTypeMismatchException"; + $fault: "client"; + Message?: string; +} + +export namespace PermissionTypeMismatchException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PermissionTypeMismatchException): any => ({ + ...obj, + }); +} + +export interface GetUnfilteredPartitionsMetadataRequest { + CatalogId: string | undefined; + DatabaseName: string | undefined; + TableName: string | undefined; + Expression?: string; + AuditContext?: AuditContext; + SupportedPermissionTypes: (PermissionType | string)[] | undefined; + NextToken?: string; + /** + *

            Defines a non-overlapping region of a table's partitions, allowing + * multiple requests to be run in parallel.

            + */ + Segment?: Segment; + + MaxResults?: number; +} + +export namespace GetUnfilteredPartitionsMetadataRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUnfilteredPartitionsMetadataRequest): any => ({ + ...obj, + }); +} + +export interface UnfilteredPartition { + /** + *

            Represents a slice of table data.

            + */ + Partition?: Partition; + + AuthorizedColumns?: string[]; + IsRegisteredWithLakeFormation?: boolean; +} + +export namespace UnfilteredPartition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UnfilteredPartition): any => ({ + ...obj, + }); +} + +export interface GetUnfilteredPartitionsMetadataResponse { + UnfilteredPartitions?: UnfilteredPartition[]; + NextToken?: string; +} + +export namespace GetUnfilteredPartitionsMetadataResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUnfilteredPartitionsMetadataResponse): any => ({ + ...obj, + }); +} + +export interface GetUnfilteredTableMetadataRequest { + CatalogId: string | undefined; + DatabaseName: string | undefined; + Name: string | undefined; + AuditContext?: AuditContext; + SupportedPermissionTypes: (PermissionType | string)[] | undefined; +} + +export namespace GetUnfilteredTableMetadataRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUnfilteredTableMetadataRequest): any => ({ + ...obj, + }); +} + +export interface ColumnRowFilter { + ColumnName?: string; + RowFilterExpression?: string; +} + +export namespace ColumnRowFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ColumnRowFilter): any => ({ + ...obj, + }); +} + +export interface GetUnfilteredTableMetadataResponse { + /** + *

            Represents a collection of related data organized in columns and rows.

            + */ + Table?: Table; + + AuthorizedColumns?: string[]; + IsRegisteredWithLakeFormation?: boolean; + CellFilters?: ColumnRowFilter[]; +} + +export namespace GetUnfilteredTableMetadataResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUnfilteredTableMetadataResponse): any => ({ + ...obj, + }); +} + export interface GetUserDefinedFunctionRequest { /** *

            The ID of the Data Catalog where the function to be retrieved is located. If none is @@ -6542,6 +6778,7 @@ export interface UpdateCrawlerRequest { */ LineageConfiguration?: LineageConfiguration; + LakeFormationConfiguration?: LakeFormationConfiguration; /** *

            Crawler configuration information. This versioned JSON string allows users * to specify aspects of a crawler's behavior. diff --git a/clients/client-glue/src/pagination/GetUnfilteredPartitionsMetadataPaginator.ts b/clients/client-glue/src/pagination/GetUnfilteredPartitionsMetadataPaginator.ts new file mode 100644 index 000000000000..bfafeca6138e --- /dev/null +++ b/clients/client-glue/src/pagination/GetUnfilteredPartitionsMetadataPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + GetUnfilteredPartitionsMetadataCommand, + GetUnfilteredPartitionsMetadataCommandInput, + GetUnfilteredPartitionsMetadataCommandOutput, +} from "../commands/GetUnfilteredPartitionsMetadataCommand"; +import { Glue } from "../Glue"; +import { GlueClient } from "../GlueClient"; +import { GluePaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: GlueClient, + input: GetUnfilteredPartitionsMetadataCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new GetUnfilteredPartitionsMetadataCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Glue, + input: GetUnfilteredPartitionsMetadataCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.getUnfilteredPartitionsMetadata(input, ...args); +}; +export async function* paginateGetUnfilteredPartitionsMetadata( + config: GluePaginationConfiguration, + input: GetUnfilteredPartitionsMetadataCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: GetUnfilteredPartitionsMetadataCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof Glue) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof GlueClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Glue | GlueClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-glue/src/pagination/index.ts b/clients/client-glue/src/pagination/index.ts index d20999b111bd..0362e143eb95 100644 --- a/clients/client-glue/src/pagination/index.ts +++ b/clients/client-glue/src/pagination/index.ts @@ -17,6 +17,7 @@ export * from "./GetSecurityConfigurationsPaginator"; export * from "./GetTableVersionsPaginator"; export * from "./GetTablesPaginator"; export * from "./GetTriggersPaginator"; +export * from "./GetUnfilteredPartitionsMetadataPaginator"; export * from "./GetUserDefinedFunctionsPaginator"; export * from "./GetWorkflowRunsPaginator"; export * from "./ListBlueprintsPaginator"; diff --git a/clients/client-glue/src/protocols/Aws_json1_1.ts b/clients/client-glue/src/protocols/Aws_json1_1.ts index 6ea24d4bd004..4e8522961d25 100644 --- a/clients/client-glue/src/protocols/Aws_json1_1.ts +++ b/clients/client-glue/src/protocols/Aws_json1_1.ts @@ -205,6 +205,18 @@ import { GetTableVersionsCommandInput, GetTableVersionsCommandOutput } from "../ import { GetTagsCommandInput, GetTagsCommandOutput } from "../commands/GetTagsCommand"; import { GetTriggerCommandInput, GetTriggerCommandOutput } from "../commands/GetTriggerCommand"; import { GetTriggersCommandInput, GetTriggersCommandOutput } from "../commands/GetTriggersCommand"; +import { + GetUnfilteredPartitionMetadataCommandInput, + GetUnfilteredPartitionMetadataCommandOutput, +} from "../commands/GetUnfilteredPartitionMetadataCommand"; +import { + GetUnfilteredPartitionsMetadataCommandInput, + GetUnfilteredPartitionsMetadataCommandOutput, +} from "../commands/GetUnfilteredPartitionsMetadataCommand"; +import { + GetUnfilteredTableMetadataCommandInput, + GetUnfilteredTableMetadataCommandOutput, +} from "../commands/GetUnfilteredTableMetadataCommand"; import { GetUserDefinedFunctionCommandInput, GetUserDefinedFunctionCommandOutput, @@ -330,6 +342,7 @@ import { AccessDeniedException, Action, AlreadyExistsException, + AuditContext, BatchCreatePartitionRequest, BatchCreatePartitionResponse, BatchDeleteConnectionRequest, @@ -487,6 +500,7 @@ import { DeleteUserDefinedFunctionResponse, DeleteWorkflowRequest, DeleteWorkflowResponse, + DeltaTarget, DevEndpoint, DoubleColumnStatisticsData, DynamoDBTarget, @@ -520,11 +534,8 @@ import { GetConnectionsRequest, GetConnectionsResponse, GetCrawlerMetricsRequest, - GetCrawlerMetricsResponse, GetCrawlerRequest, GetCrawlerResponse, - GetCrawlersRequest, - GetCrawlersResponse, GlueEncryptionException, GlueTable, GrokClassifier, @@ -539,6 +550,7 @@ import { JobNodeDetails, JobRun, JsonClassifier, + LakeFormationConfiguration, LastActiveDefinition, LastCrawlInfo, LineageConfiguration, @@ -597,6 +609,7 @@ import { BackfillError, CatalogEntry, ColumnImportance, + ColumnRowFilter, ColumnStatisticsError, ConcurrentRunsExceededException, ConfusionMatrix, @@ -611,6 +624,9 @@ import { ExportLabelsTaskRunProperties, FindMatchesMetrics, FindMatchesTaskRunProperties, + GetCrawlerMetricsResponse, + GetCrawlersRequest, + GetCrawlersResponse, GetDatabaseRequest, GetDatabaseResponse, GetDatabasesRequest, @@ -683,6 +699,12 @@ import { GetTriggerResponse, GetTriggersRequest, GetTriggersResponse, + GetUnfilteredPartitionMetadataRequest, + GetUnfilteredPartitionMetadataResponse, + GetUnfilteredPartitionsMetadataRequest, + GetUnfilteredPartitionsMetadataResponse, + GetUnfilteredTableMetadataRequest, + GetUnfilteredTableMetadataResponse, GetUserDefinedFunctionRequest, GetUserDefinedFunctionResponse, GetUserDefinedFunctionsRequest, @@ -734,6 +756,8 @@ import { NoScheduleException, OtherMetadataValueListItem, PartitionIndexDescriptor, + PermissionType, + PermissionTypeMismatchException, PropertyPredicate, PutDataCatalogEncryptionSettingsRequest, PutDataCatalogEncryptionSettingsResponse, @@ -804,6 +828,7 @@ import { TransformFilterCriteria, TransformSortCriteria, TriggerUpdate, + UnfilteredPartition, UntagResourceRequest, UntagResourceResponse, UpdateBlueprintRequest, @@ -2215,6 +2240,45 @@ export const serializeAws_json1_1GetTriggersCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1GetUnfilteredPartitionMetadataCommand = async ( + input: GetUnfilteredPartitionMetadataCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSGlue.GetUnfilteredPartitionMetadata", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1GetUnfilteredPartitionMetadataRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_1GetUnfilteredPartitionsMetadataCommand = async ( + input: GetUnfilteredPartitionsMetadataCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSGlue.GetUnfilteredPartitionsMetadata", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1GetUnfilteredPartitionsMetadataRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_1GetUnfilteredTableMetadataCommand = async ( + input: GetUnfilteredTableMetadataCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSGlue.GetUnfilteredTableMetadata", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1GetUnfilteredTableMetadataRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1GetUserDefinedFunctionCommand = async ( input: GetUserDefinedFunctionCommandInput, context: __SerdeContext @@ -11454,6 +11518,288 @@ const deserializeAws_json1_1GetTriggersCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1GetUnfilteredPartitionMetadataCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetUnfilteredPartitionMetadataCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetUnfilteredPartitionMetadataResponse(data, context); + const response: GetUnfilteredPartitionMetadataCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetUnfilteredPartitionMetadataCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.glue#EntityNotFoundException": + response = { + ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "GlueEncryptionException": + case "com.amazonaws.glue#GlueEncryptionException": + response = { + ...(await deserializeAws_json1_1GlueEncryptionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.glue#InternalServiceException": + response = { + ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.glue#InvalidInputException": + response = { + ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.glue#OperationTimeoutException": + response = { + ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "PermissionTypeMismatchException": + case "com.amazonaws.glue#PermissionTypeMismatchException": + response = { + ...(await deserializeAws_json1_1PermissionTypeMismatchExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1GetUnfilteredPartitionsMetadataCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetUnfilteredPartitionsMetadataCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetUnfilteredPartitionsMetadataResponse(data, context); + const response: GetUnfilteredPartitionsMetadataCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetUnfilteredPartitionsMetadataCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.glue#EntityNotFoundException": + response = { + ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "GlueEncryptionException": + case "com.amazonaws.glue#GlueEncryptionException": + response = { + ...(await deserializeAws_json1_1GlueEncryptionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.glue#InternalServiceException": + response = { + ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.glue#InvalidInputException": + response = { + ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.glue#OperationTimeoutException": + response = { + ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "PermissionTypeMismatchException": + case "com.amazonaws.glue#PermissionTypeMismatchException": + response = { + ...(await deserializeAws_json1_1PermissionTypeMismatchExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1GetUnfilteredTableMetadataCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetUnfilteredTableMetadataCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetUnfilteredTableMetadataResponse(data, context); + const response: GetUnfilteredTableMetadataCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetUnfilteredTableMetadataCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.glue#EntityNotFoundException": + response = { + ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "GlueEncryptionException": + case "com.amazonaws.glue#GlueEncryptionException": + response = { + ...(await deserializeAws_json1_1GlueEncryptionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.glue#InternalServiceException": + response = { + ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.glue#InvalidInputException": + response = { + ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.glue#OperationTimeoutException": + response = { + ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "PermissionTypeMismatchException": + case "com.amazonaws.glue#PermissionTypeMismatchException": + response = { + ...(await deserializeAws_json1_1PermissionTypeMismatchExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1GetUserDefinedFunctionCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -16784,6 +17130,21 @@ const deserializeAws_json1_1OperationTimeoutExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1PermissionTypeMismatchExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1PermissionTypeMismatchException(body, context); + const contents: PermissionTypeMismatchException = { + name: "PermissionTypeMismatchException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1ResourceNotReadyExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -16931,6 +17292,13 @@ const serializeAws_json1_1AdditionalPlanOptionsMap = ( }, {}); }; +const serializeAws_json1_1AuditContext = (input: AuditContext, context: __SerdeContext): any => { + return { + ...(input.AdditionalAuditContext !== undefined && + input.AdditionalAuditContext !== null && { AdditionalAuditContext: input.AdditionalAuditContext }), + }; +}; + const serializeAws_json1_1BatchCreatePartitionRequest = ( input: BatchCreatePartitionRequest, context: __SerdeContext @@ -17275,6 +17643,8 @@ const serializeAws_json1_1CatalogTablesList = (input: string[], context: __Serde const serializeAws_json1_1CatalogTarget = (input: CatalogTarget, context: __SerdeContext): any => { return { + ...(input.ConnectionName !== undefined && + input.ConnectionName !== null && { ConnectionName: input.ConnectionName }), ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), ...(input.Tables !== undefined && input.Tables !== null && { Tables: serializeAws_json1_1CatalogTablesList(input.Tables, context) }), @@ -17554,6 +17924,10 @@ const serializeAws_json1_1CrawlerTargets = (input: CrawlerTargets, context: __Se input.CatalogTargets !== null && { CatalogTargets: serializeAws_json1_1CatalogTargetList(input.CatalogTargets, context), }), + ...(input.DeltaTargets !== undefined && + input.DeltaTargets !== null && { + DeltaTargets: serializeAws_json1_1DeltaTargetList(input.DeltaTargets, context), + }), ...(input.DynamoDBTargets !== undefined && input.DynamoDBTargets !== null && { DynamoDBTargets: serializeAws_json1_1DynamoDBTargetList(input.DynamoDBTargets, context), @@ -17624,6 +17998,13 @@ const serializeAws_json1_1CreateCrawlerRequest = (input: CreateCrawlerRequest, c }), ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), ...(input.Description !== undefined && input.Description !== null && { Description: input.Description }), + ...(input.LakeFormationConfiguration !== undefined && + input.LakeFormationConfiguration !== null && { + LakeFormationConfiguration: serializeAws_json1_1LakeFormationConfiguration( + input.LakeFormationConfiguration, + context + ), + }), ...(input.LineageConfiguration !== undefined && input.LineageConfiguration !== null && { LineageConfiguration: serializeAws_json1_1LineageConfiguration(input.LineageConfiguration, context), @@ -18284,6 +18665,27 @@ const serializeAws_json1_1DeleteWorkflowRequest = (input: DeleteWorkflowRequest, }; }; +const serializeAws_json1_1DeltaTarget = (input: DeltaTarget, context: __SerdeContext): any => { + return { + ...(input.ConnectionName !== undefined && + input.ConnectionName !== null && { ConnectionName: input.ConnectionName }), + ...(input.DeltaTables !== undefined && + input.DeltaTables !== null && { DeltaTables: serializeAws_json1_1PathList(input.DeltaTables, context) }), + ...(input.WriteManifest !== undefined && input.WriteManifest !== null && { WriteManifest: input.WriteManifest }), + }; +}; + +const serializeAws_json1_1DeltaTargetList = (input: DeltaTarget[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_1DeltaTarget(entry, context); + }); +}; + const serializeAws_json1_1DevEndpointCustomLibraries = ( input: DevEndpointCustomLibraries, context: __SerdeContext @@ -18907,6 +19309,66 @@ const serializeAws_json1_1GetTriggersRequest = (input: GetTriggersRequest, conte }; }; +const serializeAws_json1_1GetUnfilteredPartitionMetadataRequest = ( + input: GetUnfilteredPartitionMetadataRequest, + context: __SerdeContext +): any => { + return { + ...(input.AuditContext !== undefined && + input.AuditContext !== null && { AuditContext: serializeAws_json1_1AuditContext(input.AuditContext, context) }), + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.PartitionValues !== undefined && + input.PartitionValues !== null && { + PartitionValues: serializeAws_json1_1ValueStringList(input.PartitionValues, context), + }), + ...(input.SupportedPermissionTypes !== undefined && + input.SupportedPermissionTypes !== null && { + SupportedPermissionTypes: serializeAws_json1_1PermissionTypeList(input.SupportedPermissionTypes, context), + }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + }; +}; + +const serializeAws_json1_1GetUnfilteredPartitionsMetadataRequest = ( + input: GetUnfilteredPartitionsMetadataRequest, + context: __SerdeContext +): any => { + return { + ...(input.AuditContext !== undefined && + input.AuditContext !== null && { AuditContext: serializeAws_json1_1AuditContext(input.AuditContext, context) }), + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.Expression !== undefined && input.Expression !== null && { Expression: input.Expression }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.Segment !== undefined && + input.Segment !== null && { Segment: serializeAws_json1_1Segment(input.Segment, context) }), + ...(input.SupportedPermissionTypes !== undefined && + input.SupportedPermissionTypes !== null && { + SupportedPermissionTypes: serializeAws_json1_1PermissionTypeList(input.SupportedPermissionTypes, context), + }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + }; +}; + +const serializeAws_json1_1GetUnfilteredTableMetadataRequest = ( + input: GetUnfilteredTableMetadataRequest, + context: __SerdeContext +): any => { + return { + ...(input.AuditContext !== undefined && + input.AuditContext !== null && { AuditContext: serializeAws_json1_1AuditContext(input.AuditContext, context) }), + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.SupportedPermissionTypes !== undefined && + input.SupportedPermissionTypes !== null && { + SupportedPermissionTypes: serializeAws_json1_1PermissionTypeList(input.SupportedPermissionTypes, context), + }), + }; +}; + const serializeAws_json1_1GetUserDefinedFunctionRequest = ( input: GetUserDefinedFunctionRequest, context: __SerdeContext @@ -19095,6 +19557,17 @@ const serializeAws_json1_1KeyList = (input: string[], context: __SerdeContext): }); }; +const serializeAws_json1_1LakeFormationConfiguration = ( + input: LakeFormationConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.AccountId !== undefined && input.AccountId !== null && { AccountId: input.AccountId }), + ...(input.UseLakeFormationCredentials !== undefined && + input.UseLakeFormationCredentials !== null && { UseLakeFormationCredentials: input.UseLakeFormationCredentials }), + }; +}; + const serializeAws_json1_1LineageConfiguration = (input: LineageConfiguration, context: __SerdeContext): any => { return { ...(input.CrawlerLineageSettings !== undefined && @@ -19210,6 +19683,17 @@ const serializeAws_json1_1LocationMap = (input: { [key: string]: string }, conte }, {}); }; +const serializeAws_json1_1LocationStringList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_json1_1LongColumnStatisticsData = ( input: LongColumnStatisticsData, context: __SerdeContext @@ -19459,6 +19943,17 @@ const serializeAws_json1_1PermissionList = (input: (Permission | string)[], cont }); }; +const serializeAws_json1_1PermissionTypeList = (input: (PermissionType | string)[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_json1_1PhysicalConnectionRequirements = ( input: PhysicalConnectionRequirements, context: __SerdeContext @@ -19996,6 +20491,10 @@ const serializeAws_json1_1StopWorkflowRunRequest = (input: StopWorkflowRunReques const serializeAws_json1_1StorageDescriptor = (input: StorageDescriptor, context: __SerdeContext): any => { return { + ...(input.AdditionalLocations !== undefined && + input.AdditionalLocations !== null && { + AdditionalLocations: serializeAws_json1_1LocationStringList(input.AdditionalLocations, context), + }), ...(input.BucketColumns !== undefined && input.BucketColumns !== null && { BucketColumns: serializeAws_json1_1NameStringList(input.BucketColumns, context), @@ -20331,6 +20830,13 @@ const serializeAws_json1_1UpdateCrawlerRequest = (input: UpdateCrawlerRequest, c }), ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), ...(input.Description !== undefined && input.Description !== null && { Description: input.Description }), + ...(input.LakeFormationConfiguration !== undefined && + input.LakeFormationConfiguration !== null && { + LakeFormationConfiguration: serializeAws_json1_1LakeFormationConfiguration( + input.LakeFormationConfiguration, + context + ), + }), ...(input.LineageConfiguration !== undefined && input.LineageConfiguration !== null && { LineageConfiguration: serializeAws_json1_1LineageConfiguration(input.LineageConfiguration, context), @@ -21146,6 +21652,7 @@ const deserializeAws_json1_1CatalogTablesList = (output: any, context: __SerdeCo const deserializeAws_json1_1CatalogTarget = (output: any, context: __SerdeContext): CatalogTarget => { return { + ConnectionName: __expectString(output.ConnectionName), DatabaseName: __expectString(output.DatabaseName), Tables: output.Tables !== undefined && output.Tables !== null @@ -21326,6 +21833,24 @@ const deserializeAws_json1_1ColumnList = (output: any, context: __SerdeContext): }); }; +const deserializeAws_json1_1ColumnRowFilter = (output: any, context: __SerdeContext): ColumnRowFilter => { + return { + ColumnName: __expectString(output.ColumnName), + RowFilterExpression: __expectString(output.RowFilterExpression), + } as any; +}; + +const deserializeAws_json1_1ColumnRowFilterList = (output: any, context: __SerdeContext): ColumnRowFilter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1ColumnRowFilter(entry, context); + }); +}; + const deserializeAws_json1_1ColumnStatistics = (output: any, context: __SerdeContext): ColumnStatistics => { return { AnalyzedTime: @@ -21596,6 +22121,10 @@ const deserializeAws_json1_1Crawler = (output: any, context: __SerdeContext): Cr : undefined, DatabaseName: __expectString(output.DatabaseName), Description: __expectString(output.Description), + LakeFormationConfiguration: + output.LakeFormationConfiguration !== undefined && output.LakeFormationConfiguration !== null + ? deserializeAws_json1_1LakeFormationConfiguration(output.LakeFormationConfiguration, context) + : undefined, LastCrawl: output.LastCrawl !== undefined && output.LastCrawl !== null ? deserializeAws_json1_1LastCrawlInfo(output.LastCrawl, context) @@ -21720,6 +22249,10 @@ const deserializeAws_json1_1CrawlerTargets = (output: any, context: __SerdeConte output.CatalogTargets !== undefined && output.CatalogTargets !== null ? deserializeAws_json1_1CatalogTargetList(output.CatalogTargets, context) : undefined, + DeltaTargets: + output.DeltaTargets !== undefined && output.DeltaTargets !== null + ? deserializeAws_json1_1DeltaTargetList(output.DeltaTargets, context) + : undefined, DynamoDBTargets: output.DynamoDBTargets !== undefined && output.DynamoDBTargets !== null ? deserializeAws_json1_1DynamoDBTargetList(output.DynamoDBTargets, context) @@ -22244,6 +22777,28 @@ const deserializeAws_json1_1DeleteWorkflowResponse = (output: any, context: __Se } as any; }; +const deserializeAws_json1_1DeltaTarget = (output: any, context: __SerdeContext): DeltaTarget => { + return { + ConnectionName: __expectString(output.ConnectionName), + DeltaTables: + output.DeltaTables !== undefined && output.DeltaTables !== null + ? deserializeAws_json1_1PathList(output.DeltaTables, context) + : undefined, + WriteManifest: __expectBoolean(output.WriteManifest), + } as any; +}; + +const deserializeAws_json1_1DeltaTargetList = (output: any, context: __SerdeContext): DeltaTarget[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1DeltaTarget(entry, context); + }); +}; + const deserializeAws_json1_1DevEndpoint = (output: any, context: __SerdeContext): DevEndpoint => { return { Arguments: @@ -23134,6 +23689,57 @@ const deserializeAws_json1_1GetTriggersResponse = (output: any, context: __Serde } as any; }; +const deserializeAws_json1_1GetUnfilteredPartitionMetadataResponse = ( + output: any, + context: __SerdeContext +): GetUnfilteredPartitionMetadataResponse => { + return { + AuthorizedColumns: + output.AuthorizedColumns !== undefined && output.AuthorizedColumns !== null + ? deserializeAws_json1_1NameStringList(output.AuthorizedColumns, context) + : undefined, + IsRegisteredWithLakeFormation: __expectBoolean(output.IsRegisteredWithLakeFormation), + Partition: + output.Partition !== undefined && output.Partition !== null + ? deserializeAws_json1_1Partition(output.Partition, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1GetUnfilteredPartitionsMetadataResponse = ( + output: any, + context: __SerdeContext +): GetUnfilteredPartitionsMetadataResponse => { + return { + NextToken: __expectString(output.NextToken), + UnfilteredPartitions: + output.UnfilteredPartitions !== undefined && output.UnfilteredPartitions !== null + ? deserializeAws_json1_1UnfilteredPartitionList(output.UnfilteredPartitions, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1GetUnfilteredTableMetadataResponse = ( + output: any, + context: __SerdeContext +): GetUnfilteredTableMetadataResponse => { + return { + AuthorizedColumns: + output.AuthorizedColumns !== undefined && output.AuthorizedColumns !== null + ? deserializeAws_json1_1NameStringList(output.AuthorizedColumns, context) + : undefined, + CellFilters: + output.CellFilters !== undefined && output.CellFilters !== null + ? deserializeAws_json1_1ColumnRowFilterList(output.CellFilters, context) + : undefined, + IsRegisteredWithLakeFormation: __expectBoolean(output.IsRegisteredWithLakeFormation), + Table: + output.Table !== undefined && output.Table !== null + ? deserializeAws_json1_1Table(output.Table, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1GetUserDefinedFunctionResponse = ( output: any, context: __SerdeContext @@ -23557,6 +24163,16 @@ const deserializeAws_json1_1LabelingSetGenerationTaskRunProperties = ( } as any; }; +const deserializeAws_json1_1LakeFormationConfiguration = ( + output: any, + context: __SerdeContext +): LakeFormationConfiguration => { + return { + AccountId: __expectString(output.AccountId), + UseLakeFormationCredentials: __expectBoolean(output.UseLakeFormationCredentials), + } as any; +}; + const deserializeAws_json1_1LastActiveDefinition = (output: any, context: __SerdeContext): LastActiveDefinition => { return { BlueprintLocation: __expectString(output.BlueprintLocation), @@ -23711,6 +24327,17 @@ const deserializeAws_json1_1LocationMap = (output: any, context: __SerdeContext) }, {}); }; +const deserializeAws_json1_1LocationStringList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + const deserializeAws_json1_1LongColumnStatisticsData = ( output: any, context: __SerdeContext @@ -24143,6 +24770,15 @@ const deserializeAws_json1_1PermissionList = (output: any, context: __SerdeConte }); }; +const deserializeAws_json1_1PermissionTypeMismatchException = ( + output: any, + context: __SerdeContext +): PermissionTypeMismatchException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1PhysicalConnectionRequirements = ( output: any, context: __SerdeContext @@ -24760,6 +25396,10 @@ const deserializeAws_json1_1StopWorkflowRunResponse = ( const deserializeAws_json1_1StorageDescriptor = (output: any, context: __SerdeContext): StorageDescriptor => { return { + AdditionalLocations: + output.AdditionalLocations !== undefined && output.AdditionalLocations !== null + ? deserializeAws_json1_1LocationStringList(output.AdditionalLocations, context) + : undefined, BucketColumns: output.BucketColumns !== undefined && output.BucketColumns !== null ? deserializeAws_json1_1NameStringList(output.BucketColumns, context) @@ -25128,6 +25768,31 @@ const deserializeAws_json1_1TriggerNodeDetails = (output: any, context: __SerdeC } as any; }; +const deserializeAws_json1_1UnfilteredPartition = (output: any, context: __SerdeContext): UnfilteredPartition => { + return { + AuthorizedColumns: + output.AuthorizedColumns !== undefined && output.AuthorizedColumns !== null + ? deserializeAws_json1_1NameStringList(output.AuthorizedColumns, context) + : undefined, + IsRegisteredWithLakeFormation: __expectBoolean(output.IsRegisteredWithLakeFormation), + Partition: + output.Partition !== undefined && output.Partition !== null + ? deserializeAws_json1_1Partition(output.Partition, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1UnfilteredPartitionList = (output: any, context: __SerdeContext): UnfilteredPartition[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1UnfilteredPartition(entry, context); + }); +}; + const deserializeAws_json1_1UntagResourceResponse = (output: any, context: __SerdeContext): UntagResourceResponse => { return {} as any; }; diff --git a/clients/client-greengrassv2/README.md b/clients/client-greengrassv2/README.md index c6bc3d6ecdb2..ee1e61545756 100644 --- a/clients/client-greengrassv2/README.md +++ b/clients/client-greengrassv2/README.md @@ -34,16 +34,16 @@ using your favorite package manager: The AWS SDK is modulized by clients and commands. To send a request, you only need to import the `GreengrassV2Client` and -the commands you need, for example `BatchAssociateClientDeviceWithCoreDeviceCommand`: +the commands you need, for example `AssociateServiceRoleToAccountCommand`: ```js // ES5 example -const { GreengrassV2Client, BatchAssociateClientDeviceWithCoreDeviceCommand } = require("@aws-sdk/client-greengrassv2"); +const { GreengrassV2Client, AssociateServiceRoleToAccountCommand } = require("@aws-sdk/client-greengrassv2"); ``` ```ts // ES6+ example -import { GreengrassV2Client, BatchAssociateClientDeviceWithCoreDeviceCommand } from "@aws-sdk/client-greengrassv2"; +import { GreengrassV2Client, AssociateServiceRoleToAccountCommand } from "@aws-sdk/client-greengrassv2"; ``` ### Usage @@ -62,7 +62,7 @@ const client = new GreengrassV2Client({ region: "REGION" }); const params = { /** input parameters */ }; -const command = new BatchAssociateClientDeviceWithCoreDeviceCommand(params); +const command = new AssociateServiceRoleToAccountCommand(params); ``` #### Async/await @@ -141,7 +141,7 @@ const client = new AWS.GreengrassV2({ region: "REGION" }); // async/await. try { - const data = await client.batchAssociateClientDeviceWithCoreDevice(params); + const data = await client.associateServiceRoleToAccount(params); // process data. } catch (error) { // error handling. @@ -149,7 +149,7 @@ try { // Promises. client - .batchAssociateClientDeviceWithCoreDevice(params) + .associateServiceRoleToAccount(params) .then((data) => { // process data. }) @@ -158,7 +158,7 @@ client }); // callbacks. -client.batchAssociateClientDeviceWithCoreDevice(params, (err, data) => { +client.associateServiceRoleToAccount(params, (err, data) => { // proccess err and data. }); ``` diff --git a/clients/client-greengrassv2/src/GreengrassV2.ts b/clients/client-greengrassv2/src/GreengrassV2.ts index 331be1dc3c4a..4f0cf794390c 100644 --- a/clients/client-greengrassv2/src/GreengrassV2.ts +++ b/clients/client-greengrassv2/src/GreengrassV2.ts @@ -1,5 +1,10 @@ import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; +import { + AssociateServiceRoleToAccountCommand, + AssociateServiceRoleToAccountCommandInput, + AssociateServiceRoleToAccountCommandOutput, +} from "./commands/AssociateServiceRoleToAccountCommand"; import { BatchAssociateClientDeviceWithCoreDeviceCommand, BatchAssociateClientDeviceWithCoreDeviceCommandInput, @@ -40,6 +45,11 @@ import { DescribeComponentCommandInput, DescribeComponentCommandOutput, } from "./commands/DescribeComponentCommand"; +import { + DisassociateServiceRoleFromAccountCommand, + DisassociateServiceRoleFromAccountCommandInput, + DisassociateServiceRoleFromAccountCommandOutput, +} from "./commands/DisassociateServiceRoleFromAccountCommand"; import { GetComponentCommand, GetComponentCommandInput, @@ -50,6 +60,11 @@ import { GetComponentVersionArtifactCommandInput, GetComponentVersionArtifactCommandOutput, } from "./commands/GetComponentVersionArtifactCommand"; +import { + GetConnectivityInfoCommand, + GetConnectivityInfoCommandInput, + GetConnectivityInfoCommandOutput, +} from "./commands/GetConnectivityInfoCommand"; import { GetCoreDeviceCommand, GetCoreDeviceCommandInput, @@ -60,6 +75,11 @@ import { GetDeploymentCommandInput, GetDeploymentCommandOutput, } from "./commands/GetDeploymentCommand"; +import { + GetServiceRoleForAccountCommand, + GetServiceRoleForAccountCommandInput, + GetServiceRoleForAccountCommandOutput, +} from "./commands/GetServiceRoleForAccountCommand"; import { ListClientDevicesAssociatedWithCoreDeviceCommand, ListClientDevicesAssociatedWithCoreDeviceCommandInput, @@ -111,6 +131,11 @@ import { UntagResourceCommandInput, UntagResourceCommandOutput, } from "./commands/UntagResourceCommand"; +import { + UpdateConnectivityInfoCommand, + UpdateConnectivityInfoCommandInput, + UpdateConnectivityInfoCommandOutput, +} from "./commands/UpdateConnectivityInfoCommand"; import { GreengrassV2Client } from "./GreengrassV2Client"; /** @@ -128,7 +153,43 @@ import { GreengrassV2Client } from "./GreengrassV2Client"; */ export class GreengrassV2 extends GreengrassV2Client { /** - *

            Associate a list of client devices with a core device. Use this API operation to specify + *

            Associates a Greengrass service role with IoT Greengrass for your Amazon Web Services account in this Amazon Web Services Region. IoT Greengrass + * uses this role to verify the identity of client devices and manage core device connectivity + * information. The role must include the AWSGreengrassResourceAccessRolePolicy managed policy or a custom policy that + * defines equivalent permissions for the IoT Greengrass features that you use. For more information, see + * Greengrass service role in the IoT Greengrass Version 2 Developer Guide.

            + */ + public associateServiceRoleToAccount( + args: AssociateServiceRoleToAccountCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public associateServiceRoleToAccount( + args: AssociateServiceRoleToAccountCommandInput, + cb: (err: any, data?: AssociateServiceRoleToAccountCommandOutput) => void + ): void; + public associateServiceRoleToAccount( + args: AssociateServiceRoleToAccountCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: AssociateServiceRoleToAccountCommandOutput) => void + ): void; + public associateServiceRoleToAccount( + args: AssociateServiceRoleToAccountCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: AssociateServiceRoleToAccountCommandOutput) => void), + cb?: (err: any, data?: AssociateServiceRoleToAccountCommandOutput) => void + ): Promise | void { + const command = new AssociateServiceRoleToAccountCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

            Associates a list of client devices with a core device. Use this API operation to specify * which client devices can discover a core device through cloud discovery. With cloud discovery, * client devices connect to IoT Greengrass to retrieve associated core devices' connectivity information * and certificates. For more information, see Configure cloud @@ -174,7 +235,7 @@ export class GreengrassV2 extends GreengrassV2Client { } /** - *

            Disassociate a list of client devices from a core device. After you disassociate a client + *

            Disassociates a list of client devices from a core device. After you disassociate a client * device from a core device, the client device won't be able to use cloud discovery to retrieve * the core device's connectivity information and certificates.

            */ @@ -478,6 +539,41 @@ export class GreengrassV2 extends GreengrassV2Client { } } + /** + *

            Disassociates the Greengrass service role from IoT Greengrass for your Amazon Web Services account in this Amazon Web Services Region. + * Without a service role, IoT Greengrass can't verify the identity of client devices or manage core device + * connectivity information. For more information, see Greengrass service role in + * the IoT Greengrass Version 2 Developer Guide.

            + */ + public disassociateServiceRoleFromAccount( + args: DisassociateServiceRoleFromAccountCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disassociateServiceRoleFromAccount( + args: DisassociateServiceRoleFromAccountCommandInput, + cb: (err: any, data?: DisassociateServiceRoleFromAccountCommandOutput) => void + ): void; + public disassociateServiceRoleFromAccount( + args: DisassociateServiceRoleFromAccountCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisassociateServiceRoleFromAccountCommandOutput) => void + ): void; + public disassociateServiceRoleFromAccount( + args: DisassociateServiceRoleFromAccountCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisassociateServiceRoleFromAccountCommandOutput) => void), + cb?: (err: any, data?: DisassociateServiceRoleFromAccountCommandOutput) => void + ): Promise | void { + const command = new DisassociateServiceRoleFromAccountCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

            Gets the recipe for a version of a component. Core devices can call this operation to * identify the artifacts and requirements to install a component.

            @@ -541,6 +637,44 @@ export class GreengrassV2 extends GreengrassV2Client { } } + /** + *

            Retrieves connectivity information for a Greengrass core device.

            + *

            Connectivity information includes endpoints and ports where client devices + * can connect to an MQTT broker on the core device. When a client device + * calls the Greengrass discovery API, + * IoT Greengrass returns connectivity information for all of the core devices where the client device can + * connect. For more information, see Connect client devices to + * core devices in the IoT Greengrass Version 2 Developer Guide.

            + */ + public getConnectivityInfo( + args: GetConnectivityInfoCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getConnectivityInfo( + args: GetConnectivityInfoCommandInput, + cb: (err: any, data?: GetConnectivityInfoCommandOutput) => void + ): void; + public getConnectivityInfo( + args: GetConnectivityInfoCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetConnectivityInfoCommandOutput) => void + ): void; + public getConnectivityInfo( + args: GetConnectivityInfoCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetConnectivityInfoCommandOutput) => void), + cb?: (err: any, data?: GetConnectivityInfoCommandOutput) => void + ): Promise | void { + const command = new GetConnectivityInfoCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

            Retrieves metadata for a Greengrass core device.

            */ @@ -605,6 +739,41 @@ export class GreengrassV2 extends GreengrassV2Client { } } + /** + *

            Gets the service role associated with IoT Greengrass for your Amazon Web Services account in this Amazon Web Services Region. + * IoT Greengrass uses this role to verify the identity of client devices and manage core device + * connectivity information. For more information, see Greengrass service role in + * the IoT Greengrass Version 2 Developer Guide.

            + */ + public getServiceRoleForAccount( + args: GetServiceRoleForAccountCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getServiceRoleForAccount( + args: GetServiceRoleForAccountCommandInput, + cb: (err: any, data?: GetServiceRoleForAccountCommandOutput) => void + ): void; + public getServiceRoleForAccount( + args: GetServiceRoleForAccountCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetServiceRoleForAccountCommandOutput) => void + ): void; + public getServiceRoleForAccount( + args: GetServiceRoleForAccountCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetServiceRoleForAccountCommandOutput) => void), + cb?: (err: any, data?: GetServiceRoleForAccountCommandOutput) => void + ): Promise | void { + const command = new GetServiceRoleForAccountCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

            Retrieves a paginated list of client devices that are associated with a core * device.

            @@ -970,4 +1139,42 @@ export class GreengrassV2 extends GreengrassV2Client { return this.send(command, optionsOrCb); } } + + /** + *

            Updates connectivity information for a Greengrass core device.

            + *

            Connectivity information includes endpoints and ports where client devices + * can connect to an MQTT broker on the core device. When a client device + * calls the Greengrass discovery API, + * IoT Greengrass returns connectivity information for all of the core devices where the client device can + * connect. For more information, see Connect client devices to + * core devices in the IoT Greengrass Version 2 Developer Guide.

            + */ + public updateConnectivityInfo( + args: UpdateConnectivityInfoCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateConnectivityInfo( + args: UpdateConnectivityInfoCommandInput, + cb: (err: any, data?: UpdateConnectivityInfoCommandOutput) => void + ): void; + public updateConnectivityInfo( + args: UpdateConnectivityInfoCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateConnectivityInfoCommandOutput) => void + ): void; + public updateConnectivityInfo( + args: UpdateConnectivityInfoCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateConnectivityInfoCommandOutput) => void), + cb?: (err: any, data?: UpdateConnectivityInfoCommandOutput) => void + ): Promise | void { + const command = new UpdateConnectivityInfoCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } } diff --git a/clients/client-greengrassv2/src/GreengrassV2Client.ts b/clients/client-greengrassv2/src/GreengrassV2Client.ts index 5b10ec439aa5..cd61be2c0b3f 100644 --- a/clients/client-greengrassv2/src/GreengrassV2Client.ts +++ b/clients/client-greengrassv2/src/GreengrassV2Client.ts @@ -49,6 +49,10 @@ import { UserAgent as __UserAgent, } from "@aws-sdk/types"; +import { + AssociateServiceRoleToAccountCommandInput, + AssociateServiceRoleToAccountCommandOutput, +} from "./commands/AssociateServiceRoleToAccountCommand"; import { BatchAssociateClientDeviceWithCoreDeviceCommandInput, BatchAssociateClientDeviceWithCoreDeviceCommandOutput, @@ -66,13 +70,25 @@ import { CreateDeploymentCommandInput, CreateDeploymentCommandOutput } from "./c import { DeleteComponentCommandInput, DeleteComponentCommandOutput } from "./commands/DeleteComponentCommand"; import { DeleteCoreDeviceCommandInput, DeleteCoreDeviceCommandOutput } from "./commands/DeleteCoreDeviceCommand"; import { DescribeComponentCommandInput, DescribeComponentCommandOutput } from "./commands/DescribeComponentCommand"; +import { + DisassociateServiceRoleFromAccountCommandInput, + DisassociateServiceRoleFromAccountCommandOutput, +} from "./commands/DisassociateServiceRoleFromAccountCommand"; import { GetComponentCommandInput, GetComponentCommandOutput } from "./commands/GetComponentCommand"; import { GetComponentVersionArtifactCommandInput, GetComponentVersionArtifactCommandOutput, } from "./commands/GetComponentVersionArtifactCommand"; +import { + GetConnectivityInfoCommandInput, + GetConnectivityInfoCommandOutput, +} from "./commands/GetConnectivityInfoCommand"; import { GetCoreDeviceCommandInput, GetCoreDeviceCommandOutput } from "./commands/GetCoreDeviceCommand"; import { GetDeploymentCommandInput, GetDeploymentCommandOutput } from "./commands/GetDeploymentCommand"; +import { + GetServiceRoleForAccountCommandInput, + GetServiceRoleForAccountCommandOutput, +} from "./commands/GetServiceRoleForAccountCommand"; import { ListClientDevicesAssociatedWithCoreDeviceCommandInput, ListClientDevicesAssociatedWithCoreDeviceCommandOutput, @@ -102,9 +118,14 @@ import { } from "./commands/ResolveComponentCandidatesCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { + UpdateConnectivityInfoCommandInput, + UpdateConnectivityInfoCommandOutput, +} from "./commands/UpdateConnectivityInfoCommand"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = + | AssociateServiceRoleToAccountCommandInput | BatchAssociateClientDeviceWithCoreDeviceCommandInput | BatchDisassociateClientDeviceFromCoreDeviceCommandInput | CancelDeploymentCommandInput @@ -113,10 +134,13 @@ export type ServiceInputTypes = | DeleteComponentCommandInput | DeleteCoreDeviceCommandInput | DescribeComponentCommandInput + | DisassociateServiceRoleFromAccountCommandInput | GetComponentCommandInput | GetComponentVersionArtifactCommandInput + | GetConnectivityInfoCommandInput | GetCoreDeviceCommandInput | GetDeploymentCommandInput + | GetServiceRoleForAccountCommandInput | ListClientDevicesAssociatedWithCoreDeviceCommandInput | ListComponentVersionsCommandInput | ListComponentsCommandInput @@ -127,9 +151,11 @@ export type ServiceInputTypes = | ListTagsForResourceCommandInput | ResolveComponentCandidatesCommandInput | TagResourceCommandInput - | UntagResourceCommandInput; + | UntagResourceCommandInput + | UpdateConnectivityInfoCommandInput; export type ServiceOutputTypes = + | AssociateServiceRoleToAccountCommandOutput | BatchAssociateClientDeviceWithCoreDeviceCommandOutput | BatchDisassociateClientDeviceFromCoreDeviceCommandOutput | CancelDeploymentCommandOutput @@ -138,10 +164,13 @@ export type ServiceOutputTypes = | DeleteComponentCommandOutput | DeleteCoreDeviceCommandOutput | DescribeComponentCommandOutput + | DisassociateServiceRoleFromAccountCommandOutput | GetComponentCommandOutput | GetComponentVersionArtifactCommandOutput + | GetConnectivityInfoCommandOutput | GetCoreDeviceCommandOutput | GetDeploymentCommandOutput + | GetServiceRoleForAccountCommandOutput | ListClientDevicesAssociatedWithCoreDeviceCommandOutput | ListComponentVersionsCommandOutput | ListComponentsCommandOutput @@ -152,7 +181,8 @@ export type ServiceOutputTypes = | ListTagsForResourceCommandOutput | ResolveComponentCandidatesCommandOutput | TagResourceCommandOutput - | UntagResourceCommandOutput; + | UntagResourceCommandOutput + | UpdateConnectivityInfoCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** diff --git a/clients/client-greengrassv2/src/commands/AssociateServiceRoleToAccountCommand.ts b/clients/client-greengrassv2/src/commands/AssociateServiceRoleToAccountCommand.ts new file mode 100644 index 000000000000..09a619ccb09d --- /dev/null +++ b/clients/client-greengrassv2/src/commands/AssociateServiceRoleToAccountCommand.ts @@ -0,0 +1,104 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GreengrassV2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GreengrassV2Client"; +import { AssociateServiceRoleToAccountRequest, AssociateServiceRoleToAccountResponse } from "../models/models_0"; +import { + deserializeAws_restJson1AssociateServiceRoleToAccountCommand, + serializeAws_restJson1AssociateServiceRoleToAccountCommand, +} from "../protocols/Aws_restJson1"; + +export interface AssociateServiceRoleToAccountCommandInput extends AssociateServiceRoleToAccountRequest {} +export interface AssociateServiceRoleToAccountCommandOutput + extends AssociateServiceRoleToAccountResponse, + __MetadataBearer {} + +/** + *

            Associates a Greengrass service role with IoT Greengrass for your Amazon Web Services account in this Amazon Web Services Region. IoT Greengrass + * uses this role to verify the identity of client devices and manage core device connectivity + * information. The role must include the AWSGreengrassResourceAccessRolePolicy managed policy or a custom policy that + * defines equivalent permissions for the IoT Greengrass features that you use. For more information, see + * Greengrass service role in the IoT Greengrass Version 2 Developer Guide.

            + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { GreengrassV2Client, AssociateServiceRoleToAccountCommand } from "@aws-sdk/client-greengrassv2"; // ES Modules import + * // const { GreengrassV2Client, AssociateServiceRoleToAccountCommand } = require("@aws-sdk/client-greengrassv2"); // CommonJS import + * const client = new GreengrassV2Client(config); + * const command = new AssociateServiceRoleToAccountCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AssociateServiceRoleToAccountCommandInput} for command's `input` shape. + * @see {@link AssociateServiceRoleToAccountCommandOutput} for command's `response` shape. + * @see {@link GreengrassV2ClientResolvedConfig | config} for GreengrassV2Client's `config` shape. + * + */ +export class AssociateServiceRoleToAccountCommand extends $Command< + AssociateServiceRoleToAccountCommandInput, + AssociateServiceRoleToAccountCommandOutput, + GreengrassV2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AssociateServiceRoleToAccountCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: GreengrassV2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "GreengrassV2Client"; + const commandName = "AssociateServiceRoleToAccountCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: AssociateServiceRoleToAccountRequest.filterSensitiveLog, + outputFilterSensitiveLog: AssociateServiceRoleToAccountResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: AssociateServiceRoleToAccountCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1AssociateServiceRoleToAccountCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1AssociateServiceRoleToAccountCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-greengrassv2/src/commands/BatchAssociateClientDeviceWithCoreDeviceCommand.ts b/clients/client-greengrassv2/src/commands/BatchAssociateClientDeviceWithCoreDeviceCommand.ts index 2004dece8f02..a3b77c062492 100644 --- a/clients/client-greengrassv2/src/commands/BatchAssociateClientDeviceWithCoreDeviceCommand.ts +++ b/clients/client-greengrassv2/src/commands/BatchAssociateClientDeviceWithCoreDeviceCommand.ts @@ -28,7 +28,7 @@ export interface BatchAssociateClientDeviceWithCoreDeviceCommandOutput __MetadataBearer {} /** - *

            Associate a list of client devices with a core device. Use this API operation to specify + *

            Associates a list of client devices with a core device. Use this API operation to specify * which client devices can discover a core device through cloud discovery. With cloud discovery, * client devices connect to IoT Greengrass to retrieve associated core devices' connectivity information * and certificates. For more information, see Configure cloud diff --git a/clients/client-greengrassv2/src/commands/BatchDisassociateClientDeviceFromCoreDeviceCommand.ts b/clients/client-greengrassv2/src/commands/BatchDisassociateClientDeviceFromCoreDeviceCommand.ts index dfffc5293287..889764cd4f7a 100644 --- a/clients/client-greengrassv2/src/commands/BatchDisassociateClientDeviceFromCoreDeviceCommand.ts +++ b/clients/client-greengrassv2/src/commands/BatchDisassociateClientDeviceFromCoreDeviceCommand.ts @@ -28,7 +28,7 @@ export interface BatchDisassociateClientDeviceFromCoreDeviceCommandOutput __MetadataBearer {} /** - *

            Disassociate a list of client devices from a core device. After you disassociate a client + *

            Disassociates a list of client devices from a core device. After you disassociate a client * device from a core device, the client device won't be able to use cloud discovery to retrieve * the core device's connectivity information and certificates.

            * @example diff --git a/clients/client-greengrassv2/src/commands/DisassociateServiceRoleFromAccountCommand.ts b/clients/client-greengrassv2/src/commands/DisassociateServiceRoleFromAccountCommand.ts new file mode 100644 index 000000000000..094c786e804d --- /dev/null +++ b/clients/client-greengrassv2/src/commands/DisassociateServiceRoleFromAccountCommand.ts @@ -0,0 +1,109 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GreengrassV2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GreengrassV2Client"; +import { + DisassociateServiceRoleFromAccountRequest, + DisassociateServiceRoleFromAccountResponse, +} from "../models/models_0"; +import { + deserializeAws_restJson1DisassociateServiceRoleFromAccountCommand, + serializeAws_restJson1DisassociateServiceRoleFromAccountCommand, +} from "../protocols/Aws_restJson1"; + +export interface DisassociateServiceRoleFromAccountCommandInput extends DisassociateServiceRoleFromAccountRequest {} +export interface DisassociateServiceRoleFromAccountCommandOutput + extends DisassociateServiceRoleFromAccountResponse, + __MetadataBearer {} + +/** + *

            Disassociates the Greengrass service role from IoT Greengrass for your Amazon Web Services account in this Amazon Web Services Region. + * Without a service role, IoT Greengrass can't verify the identity of client devices or manage core device + * connectivity information. For more information, see Greengrass service role in + * the IoT Greengrass Version 2 Developer Guide.

            + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { GreengrassV2Client, DisassociateServiceRoleFromAccountCommand } from "@aws-sdk/client-greengrassv2"; // ES Modules import + * // const { GreengrassV2Client, DisassociateServiceRoleFromAccountCommand } = require("@aws-sdk/client-greengrassv2"); // CommonJS import + * const client = new GreengrassV2Client(config); + * const command = new DisassociateServiceRoleFromAccountCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisassociateServiceRoleFromAccountCommandInput} for command's `input` shape. + * @see {@link DisassociateServiceRoleFromAccountCommandOutput} for command's `response` shape. + * @see {@link GreengrassV2ClientResolvedConfig | config} for GreengrassV2Client's `config` shape. + * + */ +export class DisassociateServiceRoleFromAccountCommand extends $Command< + DisassociateServiceRoleFromAccountCommandInput, + DisassociateServiceRoleFromAccountCommandOutput, + GreengrassV2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisassociateServiceRoleFromAccountCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: GreengrassV2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "GreengrassV2Client"; + const commandName = "DisassociateServiceRoleFromAccountCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisassociateServiceRoleFromAccountRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisassociateServiceRoleFromAccountResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DisassociateServiceRoleFromAccountCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1DisassociateServiceRoleFromAccountCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1DisassociateServiceRoleFromAccountCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts b/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts new file mode 100644 index 000000000000..5ea4e90b976c --- /dev/null +++ b/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GreengrassV2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GreengrassV2Client"; +import { GetConnectivityInfoRequest, GetConnectivityInfoResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetConnectivityInfoCommand, + serializeAws_restJson1GetConnectivityInfoCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetConnectivityInfoCommandInput extends GetConnectivityInfoRequest {} +export interface GetConnectivityInfoCommandOutput extends GetConnectivityInfoResponse, __MetadataBearer {} + +/** + *

            Retrieves connectivity information for a Greengrass core device.

            + *

            Connectivity information includes endpoints and ports where client devices + * can connect to an MQTT broker on the core device. When a client device + * calls the Greengrass discovery API, + * IoT Greengrass returns connectivity information for all of the core devices where the client device can + * connect. For more information, see Connect client devices to + * core devices in the IoT Greengrass Version 2 Developer Guide.

            + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { GreengrassV2Client, GetConnectivityInfoCommand } from "@aws-sdk/client-greengrassv2"; // ES Modules import + * // const { GreengrassV2Client, GetConnectivityInfoCommand } = require("@aws-sdk/client-greengrassv2"); // CommonJS import + * const client = new GreengrassV2Client(config); + * const command = new GetConnectivityInfoCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetConnectivityInfoCommandInput} for command's `input` shape. + * @see {@link GetConnectivityInfoCommandOutput} for command's `response` shape. + * @see {@link GreengrassV2ClientResolvedConfig | config} for GreengrassV2Client's `config` shape. + * + */ +export class GetConnectivityInfoCommand extends $Command< + GetConnectivityInfoCommandInput, + GetConnectivityInfoCommandOutput, + GreengrassV2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetConnectivityInfoCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: GreengrassV2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "GreengrassV2Client"; + const commandName = "GetConnectivityInfoCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetConnectivityInfoRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetConnectivityInfoResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetConnectivityInfoCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetConnectivityInfoCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetConnectivityInfoCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-greengrassv2/src/commands/GetServiceRoleForAccountCommand.ts b/clients/client-greengrassv2/src/commands/GetServiceRoleForAccountCommand.ts new file mode 100644 index 000000000000..d48f023bebeb --- /dev/null +++ b/clients/client-greengrassv2/src/commands/GetServiceRoleForAccountCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GreengrassV2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GreengrassV2Client"; +import { GetServiceRoleForAccountRequest, GetServiceRoleForAccountResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetServiceRoleForAccountCommand, + serializeAws_restJson1GetServiceRoleForAccountCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetServiceRoleForAccountCommandInput extends GetServiceRoleForAccountRequest {} +export interface GetServiceRoleForAccountCommandOutput extends GetServiceRoleForAccountResponse, __MetadataBearer {} + +/** + *

            Gets the service role associated with IoT Greengrass for your Amazon Web Services account in this Amazon Web Services Region. + * IoT Greengrass uses this role to verify the identity of client devices and manage core device + * connectivity information. For more information, see Greengrass service role in + * the IoT Greengrass Version 2 Developer Guide.

            + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { GreengrassV2Client, GetServiceRoleForAccountCommand } from "@aws-sdk/client-greengrassv2"; // ES Modules import + * // const { GreengrassV2Client, GetServiceRoleForAccountCommand } = require("@aws-sdk/client-greengrassv2"); // CommonJS import + * const client = new GreengrassV2Client(config); + * const command = new GetServiceRoleForAccountCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetServiceRoleForAccountCommandInput} for command's `input` shape. + * @see {@link GetServiceRoleForAccountCommandOutput} for command's `response` shape. + * @see {@link GreengrassV2ClientResolvedConfig | config} for GreengrassV2Client's `config` shape. + * + */ +export class GetServiceRoleForAccountCommand extends $Command< + GetServiceRoleForAccountCommandInput, + GetServiceRoleForAccountCommandOutput, + GreengrassV2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetServiceRoleForAccountCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: GreengrassV2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "GreengrassV2Client"; + const commandName = "GetServiceRoleForAccountCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetServiceRoleForAccountRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetServiceRoleForAccountResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetServiceRoleForAccountCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetServiceRoleForAccountCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetServiceRoleForAccountCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts b/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts new file mode 100644 index 000000000000..3bd6c669d47f --- /dev/null +++ b/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GreengrassV2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GreengrassV2Client"; +import { UpdateConnectivityInfoRequest, UpdateConnectivityInfoResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateConnectivityInfoCommand, + serializeAws_restJson1UpdateConnectivityInfoCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateConnectivityInfoCommandInput extends UpdateConnectivityInfoRequest {} +export interface UpdateConnectivityInfoCommandOutput extends UpdateConnectivityInfoResponse, __MetadataBearer {} + +/** + *

            Updates connectivity information for a Greengrass core device.

            + *

            Connectivity information includes endpoints and ports where client devices + * can connect to an MQTT broker on the core device. When a client device + * calls the Greengrass discovery API, + * IoT Greengrass returns connectivity information for all of the core devices where the client device can + * connect. For more information, see Connect client devices to + * core devices in the IoT Greengrass Version 2 Developer Guide.

            + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { GreengrassV2Client, UpdateConnectivityInfoCommand } from "@aws-sdk/client-greengrassv2"; // ES Modules import + * // const { GreengrassV2Client, UpdateConnectivityInfoCommand } = require("@aws-sdk/client-greengrassv2"); // CommonJS import + * const client = new GreengrassV2Client(config); + * const command = new UpdateConnectivityInfoCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateConnectivityInfoCommandInput} for command's `input` shape. + * @see {@link UpdateConnectivityInfoCommandOutput} for command's `response` shape. + * @see {@link GreengrassV2ClientResolvedConfig | config} for GreengrassV2Client's `config` shape. + * + */ +export class UpdateConnectivityInfoCommand extends $Command< + UpdateConnectivityInfoCommandInput, + UpdateConnectivityInfoCommandOutput, + GreengrassV2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateConnectivityInfoCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: GreengrassV2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "GreengrassV2Client"; + const commandName = "UpdateConnectivityInfoCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateConnectivityInfoRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateConnectivityInfoResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateConnectivityInfoCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateConnectivityInfoCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateConnectivityInfoCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-greengrassv2/src/commands/index.ts b/clients/client-greengrassv2/src/commands/index.ts index d40be118c33a..3fab8373c874 100644 --- a/clients/client-greengrassv2/src/commands/index.ts +++ b/clients/client-greengrassv2/src/commands/index.ts @@ -1,3 +1,4 @@ +export * from "./AssociateServiceRoleToAccountCommand"; export * from "./BatchAssociateClientDeviceWithCoreDeviceCommand"; export * from "./BatchDisassociateClientDeviceFromCoreDeviceCommand"; export * from "./CancelDeploymentCommand"; @@ -6,10 +7,13 @@ export * from "./CreateDeploymentCommand"; export * from "./DeleteComponentCommand"; export * from "./DeleteCoreDeviceCommand"; export * from "./DescribeComponentCommand"; +export * from "./DisassociateServiceRoleFromAccountCommand"; export * from "./GetComponentCommand"; export * from "./GetComponentVersionArtifactCommand"; +export * from "./GetConnectivityInfoCommand"; export * from "./GetCoreDeviceCommand"; export * from "./GetDeploymentCommand"; +export * from "./GetServiceRoleForAccountCommand"; export * from "./ListClientDevicesAssociatedWithCoreDeviceCommand"; export * from "./ListComponentVersionsCommand"; export * from "./ListComponentsCommand"; @@ -21,3 +25,4 @@ export * from "./ListTagsForResourceCommand"; export * from "./ResolveComponentCandidatesCommand"; export * from "./TagResourceCommand"; export * from "./UntagResourceCommand"; +export * from "./UpdateConnectivityInfoCommand"; diff --git a/clients/client-greengrassv2/src/models/models_0.ts b/clients/client-greengrassv2/src/models/models_0.ts index 07985f581ccb..03a983ca819f 100644 --- a/clients/client-greengrassv2/src/models/models_0.ts +++ b/clients/client-greengrassv2/src/models/models_0.ts @@ -94,40 +94,36 @@ export namespace AssociatedClientDevice { }); } -export interface BatchAssociateClientDeviceWithCoreDeviceRequest { +export interface AssociateServiceRoleToAccountRequest { /** - *

            The list of client devices to associate.

            + *

            The Amazon Resource Name (ARN) of the service role to associate with IoT Greengrass for your + * Amazon Web Services account in this Amazon Web Services Region.

            */ - entries?: AssociateClientDeviceWithCoreDeviceEntry[]; - - /** - *

            The name of the core device. This is also the name of the IoT thing.

            - */ - coreDeviceThingName: string | undefined; + roleArn: string | undefined; } -export namespace BatchAssociateClientDeviceWithCoreDeviceRequest { +export namespace AssociateServiceRoleToAccountRequest { /** * @internal */ - export const filterSensitiveLog = (obj: BatchAssociateClientDeviceWithCoreDeviceRequest): any => ({ + export const filterSensitiveLog = (obj: AssociateServiceRoleToAccountRequest): any => ({ ...obj, }); } -export interface BatchAssociateClientDeviceWithCoreDeviceResponse { +export interface AssociateServiceRoleToAccountResponse { /** - *

            The list of any errors for the entries in the request. Each error entry contains the name - * of the IoT thing that failed to associate.

            + *

            The time when the service role was associated with IoT Greengrass for your Amazon Web Services account in this + * Amazon Web Services Region.

            */ - errorEntries?: AssociateClientDeviceWithCoreDeviceErrorEntry[]; + associatedAt?: string; } -export namespace BatchAssociateClientDeviceWithCoreDeviceResponse { +export namespace AssociateServiceRoleToAccountResponse { /** * @internal */ - export const filterSensitiveLog = (obj: BatchAssociateClientDeviceWithCoreDeviceResponse): any => ({ + export const filterSensitiveLog = (obj: AssociateServiceRoleToAccountResponse): any => ({ ...obj, }); } @@ -155,120 +151,158 @@ export namespace InternalServerException { } /** - *

            The requested resource can't be found.

            + *

            Contains information about a validation exception field.

            */ -export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { - name: "ResourceNotFoundException"; - $fault: "client"; - message: string | undefined; +export interface ValidationExceptionField { /** - *

            The ID of the resource that isn't found.

            + *

            The name of the exception field.

            */ - resourceId: string | undefined; + name: string | undefined; /** - *

            The type of the resource that isn't found.

            + *

            The message of the exception field.

            */ - resourceType: string | undefined; + message: string | undefined; } -export namespace ResourceNotFoundException { +export namespace ValidationExceptionField { /** * @internal */ - export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ + export const filterSensitiveLog = (obj: ValidationExceptionField): any => ({ ...obj, }); } +export enum ValidationExceptionReason { + CANNOT_PARSE = "CANNOT_PARSE", + FIELD_VALIDATION_FAILED = "FIELD_VALIDATION_FAILED", + OTHER = "OTHER", + UNKNOWN_OPERATION = "UNKNOWN_OPERATION", +} + /** - *

            Your request exceeded a request rate quota. For example, you might have exceeded the - * amount of times that you can retrieve device or deployment status per second.

            + *

            The request isn't valid. This can occur if your request contains malformed JSON or + * unsupported characters.

            */ -export interface ThrottlingException extends __SmithyException, $MetadataBearer { - name: "ThrottlingException"; +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; $fault: "client"; message: string | undefined; /** - *

            The code for the quota in Service Quotas.

            + *

            The reason for the validation exception.

            */ - quotaCode?: string; + reason?: ValidationExceptionReason | string; /** - *

            The code for the service in Service Quotas.

            + *

            The list of fields that failed to validate.

            */ - serviceCode?: string; + fields?: ValidationExceptionField[]; +} +export namespace ValidationException { /** - *

            The amount of time to wait before you retry the request.

            + * @internal */ - retryAfterSeconds?: number; + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); } -export namespace ThrottlingException { +export interface BatchAssociateClientDeviceWithCoreDeviceRequest { + /** + *

            The list of client devices to associate.

            + */ + entries?: AssociateClientDeviceWithCoreDeviceEntry[]; + + /** + *

            The name of the core device. This is also the name of the IoT thing.

            + */ + coreDeviceThingName: string | undefined; +} + +export namespace BatchAssociateClientDeviceWithCoreDeviceRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ThrottlingException): any => ({ + export const filterSensitiveLog = (obj: BatchAssociateClientDeviceWithCoreDeviceRequest): any => ({ + ...obj, + }); +} + +export interface BatchAssociateClientDeviceWithCoreDeviceResponse { + /** + *

            The list of any errors for the entries in the request. Each error entry contains the name + * of the IoT thing that failed to associate.

            + */ + errorEntries?: AssociateClientDeviceWithCoreDeviceErrorEntry[]; +} + +export namespace BatchAssociateClientDeviceWithCoreDeviceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchAssociateClientDeviceWithCoreDeviceResponse): any => ({ ...obj, }); } /** - *

            Contains information about a validation exception field.

            + *

            The requested resource can't be found.

            */ -export interface ValidationExceptionField { +export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { + name: "ResourceNotFoundException"; + $fault: "client"; + message: string | undefined; /** - *

            The name of the exception field.

            + *

            The ID of the resource that isn't found.

            */ - name: string | undefined; + resourceId: string | undefined; /** - *

            The message of the exception field.

            + *

            The type of the resource that isn't found.

            */ - message: string | undefined; + resourceType: string | undefined; } -export namespace ValidationExceptionField { +export namespace ResourceNotFoundException { /** * @internal */ - export const filterSensitiveLog = (obj: ValidationExceptionField): any => ({ + export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ ...obj, }); } -export enum ValidationExceptionReason { - CANNOT_PARSE = "CANNOT_PARSE", - FIELD_VALIDATION_FAILED = "FIELD_VALIDATION_FAILED", - OTHER = "OTHER", - UNKNOWN_OPERATION = "UNKNOWN_OPERATION", -} - /** - *

            The request isn't valid. This can occur if your request contains malformed JSON or - * unsupported characters.

            + *

            Your request exceeded a request rate quota. For example, you might have exceeded the + * amount of times that you can retrieve device or deployment status per second.

            */ -export interface ValidationException extends __SmithyException, $MetadataBearer { - name: "ValidationException"; +export interface ThrottlingException extends __SmithyException, $MetadataBearer { + name: "ThrottlingException"; $fault: "client"; message: string | undefined; /** - *

            The reason for the validation exception.

            + *

            The code for the quota in Service Quotas.

            */ - reason?: ValidationExceptionReason | string; + quotaCode?: string; /** - *

            The list of fields that failed to validate.

            + *

            The code for the service in Service Quotas.

            */ - fields?: ValidationExceptionField[]; + serviceCode?: string; + + /** + *

            The amount of time to wait before you retry the request.

            + */ + retryAfterSeconds?: number; } -export namespace ValidationException { +export namespace ThrottlingException { /** * @internal */ - export const filterSensitiveLog = (obj: ValidationException): any => ({ + export const filterSensitiveLog = (obj: ThrottlingException): any => ({ ...obj, }); } @@ -347,8 +381,8 @@ export namespace DisassociateClientDeviceFromCoreDeviceErrorEntry { export interface BatchDisassociateClientDeviceFromCoreDeviceResponse { /** - *

            The list of errors (if any) for the entries in the request. Each error entry contains the - * name of the IoT thing that failed to disassociate.

            + *

            The list of any errors for the entries in the request. Each error entry contains the name + * of the IoT thing that failed to disassociate.

            */ errorEntries?: DisassociateClientDeviceFromCoreDeviceErrorEntry[]; } @@ -825,6 +859,43 @@ export enum ComponentVisibilityScope { PUBLIC = "PUBLIC", } +/** + *

            Contains information about an endpoint and port where client devices can connect to an + * MQTT broker on a Greengrass core device.

            + */ +export interface ConnectivityInfo { + /** + *

            An ID for the connectivity information.

            + */ + id?: string; + + /** + *

            The IP address or DNS address where client devices can connect to an MQTT broker on the + * Greengrass core device.

            + */ + hostAddress?: string; + + /** + *

            The port where the MQTT broker operates on the core device. This port is typically 8883, + * which is the default port for the MQTT broker component that runs on core devices.

            + */ + portNumber?: number; + + /** + *

            Additional metadata to provide to client devices that connect to this core device.

            + */ + metadata?: string; +} + +export namespace ConnectivityInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConnectivityInfo): any => ({ + ...obj, + }); +} + export enum CoreDeviceStatus { HEALTHY = "HEALTHY", UNHEALTHY = "UNHEALTHY", @@ -841,7 +912,8 @@ export interface CoreDevice { coreDeviceThingName?: string; /** - *

            The status of the core device. Core devices can have the following statuses:

            + *

            The status of the core device. Core devices can have the following + * statuses:

            *
              *
            • *

              @@ -1940,6 +2012,34 @@ export namespace DescribeComponentResponse { }); } +export interface DisassociateServiceRoleFromAccountRequest {} + +export namespace DisassociateServiceRoleFromAccountRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateServiceRoleFromAccountRequest): any => ({ + ...obj, + }); +} + +export interface DisassociateServiceRoleFromAccountResponse { + /** + *

              The time when the service role was disassociated from IoT Greengrass for your Amazon Web Services account in this + * Amazon Web Services Region.

              + */ + disassociatedAt?: string; +} + +export namespace DisassociateServiceRoleFromAccountResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateServiceRoleFromAccountResponse): any => ({ + ...obj, + }); +} + export enum EffectiveDeploymentExecutionStatus { CANCELED = "CANCELED", COMPLETED = "COMPLETED", @@ -2111,6 +2211,43 @@ export namespace GetComponentVersionArtifactResponse { }); } +export interface GetConnectivityInfoRequest { + /** + *

              The name of the core device. This is also the name of the IoT thing.

              + */ + thingName: string | undefined; +} + +export namespace GetConnectivityInfoRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetConnectivityInfoRequest): any => ({ + ...obj, + }); +} + +export interface GetConnectivityInfoResponse { + /** + *

              The connectivity information for the core device.

              + */ + connectivityInfo?: ConnectivityInfo[]; + + /** + *

              A message about the connectivity information request.

              + */ + message?: string; +} + +export namespace GetConnectivityInfoResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetConnectivityInfoResponse): any => ({ + ...obj, + }); +} + export interface GetCoreDeviceRequest { /** *

              The name of the core device. This is also the name of the IoT thing.

              @@ -2287,6 +2424,40 @@ export namespace GetDeploymentResponse { }); } +export interface GetServiceRoleForAccountRequest {} + +export namespace GetServiceRoleForAccountRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetServiceRoleForAccountRequest): any => ({ + ...obj, + }); +} + +export interface GetServiceRoleForAccountResponse { + /** + *

              The time when the service role was associated with IoT Greengrass for your Amazon Web Services account in this + * Amazon Web Services Region.

              + */ + associatedAt?: string; + + /** + *

              The ARN of the service role that is associated with IoT Greengrass for your Amazon Web Services account in this + * Amazon Web Services Region.

              + */ + roleArn?: string; +} + +export namespace GetServiceRoleForAccountResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetServiceRoleForAccountResponse): any => ({ + ...obj, + }); +} + export interface ListClientDevicesAssociatedWithCoreDeviceRequest { /** *

              The name of the core device. This is also the name of the IoT thing.

              @@ -2876,3 +3047,45 @@ export namespace UntagResourceResponse { ...obj, }); } + +export interface UpdateConnectivityInfoRequest { + /** + *

              The name of the core device. This is also the name of the IoT thing.

              + */ + thingName: string | undefined; + + /** + *

              The connectivity information for the core device.

              + */ + connectivityInfo: ConnectivityInfo[] | undefined; +} + +export namespace UpdateConnectivityInfoRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateConnectivityInfoRequest): any => ({ + ...obj, + }); +} + +export interface UpdateConnectivityInfoResponse { + /** + *

              The new version of the connectivity information for the core device.

              + */ + version?: string; + + /** + *

              A message about the connectivity information update request.

              + */ + message?: string; +} + +export namespace UpdateConnectivityInfoResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateConnectivityInfoResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-greengrassv2/src/protocols/Aws_restJson1.ts b/clients/client-greengrassv2/src/protocols/Aws_restJson1.ts index d62bb4d3855e..ccd7a036c02a 100644 --- a/clients/client-greengrassv2/src/protocols/Aws_restJson1.ts +++ b/clients/client-greengrassv2/src/protocols/Aws_restJson1.ts @@ -22,6 +22,10 @@ import { } from "@aws-sdk/types"; import { v4 as generateIdempotencyToken } from "uuid"; +import { + AssociateServiceRoleToAccountCommandInput, + AssociateServiceRoleToAccountCommandOutput, +} from "../commands/AssociateServiceRoleToAccountCommand"; import { BatchAssociateClientDeviceWithCoreDeviceCommandInput, BatchAssociateClientDeviceWithCoreDeviceCommandOutput, @@ -39,13 +43,25 @@ import { CreateDeploymentCommandInput, CreateDeploymentCommandOutput } from "../ import { DeleteComponentCommandInput, DeleteComponentCommandOutput } from "../commands/DeleteComponentCommand"; import { DeleteCoreDeviceCommandInput, DeleteCoreDeviceCommandOutput } from "../commands/DeleteCoreDeviceCommand"; import { DescribeComponentCommandInput, DescribeComponentCommandOutput } from "../commands/DescribeComponentCommand"; +import { + DisassociateServiceRoleFromAccountCommandInput, + DisassociateServiceRoleFromAccountCommandOutput, +} from "../commands/DisassociateServiceRoleFromAccountCommand"; import { GetComponentCommandInput, GetComponentCommandOutput } from "../commands/GetComponentCommand"; import { GetComponentVersionArtifactCommandInput, GetComponentVersionArtifactCommandOutput, } from "../commands/GetComponentVersionArtifactCommand"; +import { + GetConnectivityInfoCommandInput, + GetConnectivityInfoCommandOutput, +} from "../commands/GetConnectivityInfoCommand"; import { GetCoreDeviceCommandInput, GetCoreDeviceCommandOutput } from "../commands/GetCoreDeviceCommand"; import { GetDeploymentCommandInput, GetDeploymentCommandOutput } from "../commands/GetDeploymentCommand"; +import { + GetServiceRoleForAccountCommandInput, + GetServiceRoleForAccountCommandOutput, +} from "../commands/GetServiceRoleForAccountCommand"; import { ListClientDevicesAssociatedWithCoreDeviceCommandInput, ListClientDevicesAssociatedWithCoreDeviceCommandOutput, @@ -75,6 +91,10 @@ import { } from "../commands/ResolveComponentCandidatesCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { + UpdateConnectivityInfoCommandInput, + UpdateConnectivityInfoCommandOutput, +} from "../commands/UpdateConnectivityInfoCommand"; import { AccessDeniedException, AssociateClientDeviceWithCoreDeviceEntry, @@ -91,6 +111,7 @@ import { ComponentRunWith, ComponentVersionListItem, ConflictException, + ConnectivityInfo, CoreDevice, Deployment, DeploymentComponentUpdatePolicy, @@ -125,6 +146,31 @@ import { ValidationExceptionField, } from "../models/models_0"; +export const serializeAws_restJson1AssociateServiceRoleToAccountCommand = async ( + input: AssociateServiceRoleToAccountCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/greengrass/servicerole"; + let body: any; + body = JSON.stringify({ + ...(input.roleArn !== undefined && input.roleArn !== null && { RoleArn: input.roleArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1BatchAssociateClientDeviceWithCoreDeviceCommand = async ( input: BatchAssociateClientDeviceWithCoreDeviceCommandInput, context: __SerdeContext @@ -392,6 +438,29 @@ export const serializeAws_restJson1DescribeComponentCommand = async ( }); }; +export const serializeAws_restJson1DisassociateServiceRoleFromAccountCommand = async ( + input: DisassociateServiceRoleFromAccountCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/greengrass/servicerole"; + let body: any; + body = ""; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1GetComponentCommand = async ( input: GetComponentCommandInput, context: __SerdeContext @@ -470,6 +539,36 @@ export const serializeAws_restJson1GetComponentVersionArtifactCommand = async ( }); }; +export const serializeAws_restJson1GetConnectivityInfoCommand = async ( + input: GetConnectivityInfoCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/greengrass/things/{thingName}/connectivityInfo"; + if (input.thingName !== undefined) { + const labelValue: string = input.thingName; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: thingName."); + } + resolvedPath = resolvedPath.replace("{thingName}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: thingName."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1GetCoreDeviceCommand = async ( input: GetCoreDeviceCommandInput, context: __SerdeContext @@ -529,6 +628,29 @@ export const serializeAws_restJson1GetDeploymentCommand = async ( }); }; +export const serializeAws_restJson1GetServiceRoleForAccountCommand = async ( + input: GetServiceRoleForAccountCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/greengrass/servicerole"; + let body: any; + body = ""; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1ListClientDevicesAssociatedWithCoreDeviceCommand = async ( input: ListClientDevicesAssociatedWithCoreDeviceCommandInput, context: __SerdeContext @@ -871,6 +993,107 @@ export const serializeAws_restJson1UntagResourceCommand = async ( }); }; +export const serializeAws_restJson1UpdateConnectivityInfoCommand = async ( + input: UpdateConnectivityInfoCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/greengrass/things/{thingName}/connectivityInfo"; + if (input.thingName !== undefined) { + const labelValue: string = input.thingName; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: thingName."); + } + resolvedPath = resolvedPath.replace("{thingName}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: thingName."); + } + let body: any; + body = JSON.stringify({ + ...(input.connectivityInfo !== undefined && + input.connectivityInfo !== null && { + ConnectivityInfo: serializeAws_restJson1connectivityInfoList(input.connectivityInfo, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + +export const deserializeAws_restJson1AssociateServiceRoleToAccountCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1AssociateServiceRoleToAccountCommandError(output, context); + } + const contents: AssociateServiceRoleToAccountCommandOutput = { + $metadata: deserializeMetadata(output), + associatedAt: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.AssociatedAt !== undefined && data.AssociatedAt !== null) { + contents.associatedAt = __expectString(data.AssociatedAt); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1AssociateServiceRoleToAccountCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.greengrassv2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.greengrassv2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1BatchAssociateClientDeviceWithCoreDeviceCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1669,6 +1892,61 @@ const deserializeAws_restJson1DescribeComponentCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1DisassociateServiceRoleFromAccountCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisassociateServiceRoleFromAccountCommandError(output, context); + } + const contents: DisassociateServiceRoleFromAccountCommandOutput = { + $metadata: deserializeMetadata(output), + disassociatedAt: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.DisassociatedAt !== undefined && data.DisassociatedAt !== null) { + contents.disassociatedAt = __expectString(data.DisassociatedAt); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisassociateServiceRoleFromAccountCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.greengrassv2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1GetComponentCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1851,6 +2129,73 @@ const deserializeAws_restJson1GetComponentVersionArtifactCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1GetConnectivityInfoCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetConnectivityInfoCommandError(output, context); + } + const contents: GetConnectivityInfoCommandOutput = { + $metadata: deserializeMetadata(output), + connectivityInfo: undefined, + message: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.ConnectivityInfo !== undefined && data.ConnectivityInfo !== null) { + contents.connectivityInfo = deserializeAws_restJson1connectivityInfoList(data.ConnectivityInfo, context); + } + if (data.Message !== undefined && data.Message !== null) { + contents.message = __expectString(data.Message); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetConnectivityInfoCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.greengrassv2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.greengrassv2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1GetCoreDeviceCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2102,6 +2447,65 @@ const deserializeAws_restJson1GetDeploymentCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1GetServiceRoleForAccountCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetServiceRoleForAccountCommandError(output, context); + } + const contents: GetServiceRoleForAccountCommandOutput = { + $metadata: deserializeMetadata(output), + associatedAt: undefined, + roleArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.AssociatedAt !== undefined && data.AssociatedAt !== null) { + contents.associatedAt = __expectString(data.AssociatedAt); + } + if (data.RoleArn !== undefined && data.RoleArn !== null) { + contents.roleArn = __expectString(data.RoleArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetServiceRoleForAccountCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.greengrassv2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1ListClientDevicesAssociatedWithCoreDeviceCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -3024,6 +3428,73 @@ const deserializeAws_restJson1UntagResourceCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1UpdateConnectivityInfoCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateConnectivityInfoCommandError(output, context); + } + const contents: UpdateConnectivityInfoCommandOutput = { + $metadata: deserializeMetadata(output), + message: undefined, + version: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Message !== undefined && data.Message !== null) { + contents.message = __expectString(data.Message); + } + if (data.Version !== undefined && data.Version !== null) { + contents.version = __expectString(data.Version); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateConnectivityInfoCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.greengrassv2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.greengrassv2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + const deserializeAws_restJson1AccessDeniedExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -3389,6 +3860,26 @@ const serializeAws_restJson1ComponentVersionRequirementMap = ( }, {}); }; +const serializeAws_restJson1ConnectivityInfo = (input: ConnectivityInfo, context: __SerdeContext): any => { + return { + ...(input.hostAddress !== undefined && input.hostAddress !== null && { HostAddress: input.hostAddress }), + ...(input.id !== undefined && input.id !== null && { Id: input.id }), + ...(input.metadata !== undefined && input.metadata !== null && { Metadata: input.metadata }), + ...(input.portNumber !== undefined && input.portNumber !== null && { PortNumber: input.portNumber }), + }; +}; + +const serializeAws_restJson1connectivityInfoList = (input: ConnectivityInfo[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1ConnectivityInfo(entry, context); + }); +}; + const serializeAws_restJson1DeploymentComponentUpdatePolicy = ( input: DeploymentComponentUpdatePolicy, context: __SerdeContext @@ -3982,6 +4473,26 @@ const deserializeAws_restJson1ComponentVersionListItem = ( } as any; }; +const deserializeAws_restJson1ConnectivityInfo = (output: any, context: __SerdeContext): ConnectivityInfo => { + return { + hostAddress: __expectString(output.HostAddress), + id: __expectString(output.Id), + metadata: __expectString(output.Metadata), + portNumber: __expectInt32(output.PortNumber), + } as any; +}; + +const deserializeAws_restJson1connectivityInfoList = (output: any, context: __SerdeContext): ConnectivityInfo[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ConnectivityInfo(entry, context); + }); +}; + const deserializeAws_restJson1CoreDevice = (output: any, context: __SerdeContext): CoreDevice => { return { coreDeviceThingName: __expectString(output.coreDeviceThingName), diff --git a/clients/client-iot-wireless/README.md b/clients/client-iot-wireless/README.md index bba55f8626e6..8de297c38301 100644 --- a/clients/client-iot-wireless/README.md +++ b/clients/client-iot-wireless/README.md @@ -7,7 +7,17 @@ AWS SDK for JavaScript IoTWireless Client for Node.js, Browser and React Native. -

              AWS IoT Wireless API documentation

              +

              AWS IoT Wireless provides bi-directional communication between internet-connected wireless +devices and the AWS Cloud. To onboard both LoRaWAN and Sidewalk devices to AWS IoT, use the +IoT Wireless API. These wireless devices use the Low Power Wide Area Networking (LPWAN) +communication protocol to communicate with AWS IoT.

              +

              Using the API, you can perform create, read, update, and delete operations for your wireless +devices, gateways, destinations, and profiles. After onboarding your devices, you +can use the API operations to set log levels and monitor your devices with CloudWatch.

              +

              You can also use the API operations to create multicast groups and schedule a multicast session for +sending a downlink message to devices in the group. By using Firmware Updates Over-The-Air +(FUOTA) API operations, you can create a FUOTA task and schedule a session to update the firmware +of individual devices or an entire group of devices in a multicast group.

              ## Installing diff --git a/clients/client-iot-wireless/src/IoTWireless.ts b/clients/client-iot-wireless/src/IoTWireless.ts index 417cb8ebbcc3..3652babe954d 100644 --- a/clients/client-iot-wireless/src/IoTWireless.ts +++ b/clients/client-iot-wireless/src/IoTWireless.ts @@ -105,6 +105,11 @@ import { DeleteMulticastGroupCommandInput, DeleteMulticastGroupCommandOutput, } from "./commands/DeleteMulticastGroupCommand"; +import { + DeleteQueuedMessagesCommand, + DeleteQueuedMessagesCommandInput, + DeleteQueuedMessagesCommandOutput, +} from "./commands/DeleteQueuedMessagesCommand"; import { DeleteServiceProfileCommand, DeleteServiceProfileCommandInput, @@ -295,6 +300,11 @@ import { ListPartnerAccountsCommandInput, ListPartnerAccountsCommandOutput, } from "./commands/ListPartnerAccountsCommand"; +import { + ListQueuedMessagesCommand, + ListQueuedMessagesCommandInput, + ListQueuedMessagesCommandOutput, +} from "./commands/ListQueuedMessagesCommand"; import { ListServiceProfilesCommand, ListServiceProfilesCommandInput, @@ -424,7 +434,17 @@ import { import { IoTWirelessClient } from "./IoTWirelessClient"; /** - *

              AWS IoT Wireless API documentation

              + *

              AWS IoT Wireless provides bi-directional communication between internet-connected wireless + * devices and the AWS Cloud. To onboard both LoRaWAN and Sidewalk devices to AWS IoT, use the + * IoT Wireless API. These wireless devices use the Low Power Wide Area Networking (LPWAN) + * communication protocol to communicate with AWS IoT.

              + *

              Using the API, you can perform create, read, update, and delete operations for your wireless + * devices, gateways, destinations, and profiles. After onboarding your devices, you + * can use the API operations to set log levels and monitor your devices with CloudWatch.

              + *

              You can also use the API operations to create multicast groups and schedule a multicast session for + * sending a downlink message to devices in the group. By using Firmware Updates Over-The-Air + * (FUOTA) API operations, you can create a FUOTA task and schedule a session to update the firmware + * of individual devices or an entire group of devices in a multicast group.

              */ export class IoTWireless extends IoTWirelessClient { /** @@ -1105,6 +1125,38 @@ export class IoTWireless extends IoTWirelessClient { } } + /** + *

              The operation to delete queued messages.

              + */ + public deleteQueuedMessages( + args: DeleteQueuedMessagesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteQueuedMessages( + args: DeleteQueuedMessagesCommandInput, + cb: (err: any, data?: DeleteQueuedMessagesCommandOutput) => void + ): void; + public deleteQueuedMessages( + args: DeleteQueuedMessagesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteQueuedMessagesCommandOutput) => void + ): void; + public deleteQueuedMessages( + args: DeleteQueuedMessagesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteQueuedMessagesCommandOutput) => void), + cb?: (err: any, data?: DeleteQueuedMessagesCommandOutput) => void + ): Promise | void { + const command = new DeleteQueuedMessagesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

              Deletes a service profile.

              */ @@ -2332,6 +2384,38 @@ export class IoTWireless extends IoTWirelessClient { } } + /** + *

              The operation to list queued messages.

              + */ + public listQueuedMessages( + args: ListQueuedMessagesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listQueuedMessages( + args: ListQueuedMessagesCommandInput, + cb: (err: any, data?: ListQueuedMessagesCommandOutput) => void + ): void; + public listQueuedMessages( + args: ListQueuedMessagesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListQueuedMessagesCommandOutput) => void + ): void; + public listQueuedMessages( + args: ListQueuedMessagesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListQueuedMessagesCommandOutput) => void), + cb?: (err: any, data?: ListQueuedMessagesCommandOutput) => void + ): Promise | void { + const command = new ListQueuedMessagesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

              Lists the service profiles registered to your AWS account.

              */ diff --git a/clients/client-iot-wireless/src/IoTWirelessClient.ts b/clients/client-iot-wireless/src/IoTWirelessClient.ts index 2b558b2ca6b6..eaf4b40b3953 100644 --- a/clients/client-iot-wireless/src/IoTWirelessClient.ts +++ b/clients/client-iot-wireless/src/IoTWirelessClient.ts @@ -121,6 +121,10 @@ import { DeleteMulticastGroupCommandInput, DeleteMulticastGroupCommandOutput, } from "./commands/DeleteMulticastGroupCommand"; +import { + DeleteQueuedMessagesCommandInput, + DeleteQueuedMessagesCommandOutput, +} from "./commands/DeleteQueuedMessagesCommand"; import { DeleteServiceProfileCommandInput, DeleteServiceProfileCommandOutput, @@ -237,6 +241,7 @@ import { ListPartnerAccountsCommandInput, ListPartnerAccountsCommandOutput, } from "./commands/ListPartnerAccountsCommand"; +import { ListQueuedMessagesCommandInput, ListQueuedMessagesCommandOutput } from "./commands/ListQueuedMessagesCommand"; import { ListServiceProfilesCommandInput, ListServiceProfilesCommandOutput, @@ -347,6 +352,7 @@ export type ServiceInputTypes = | DeleteDeviceProfileCommandInput | DeleteFuotaTaskCommandInput | DeleteMulticastGroupCommandInput + | DeleteQueuedMessagesCommandInput | DeleteServiceProfileCommandInput | DeleteWirelessDeviceCommandInput | DeleteWirelessGatewayCommandInput @@ -385,6 +391,7 @@ export type ServiceInputTypes = | ListMulticastGroupsByFuotaTaskCommandInput | ListMulticastGroupsCommandInput | ListPartnerAccountsCommandInput + | ListQueuedMessagesCommandInput | ListServiceProfilesCommandInput | ListTagsForResourceCommandInput | ListWirelessDevicesCommandInput @@ -434,6 +441,7 @@ export type ServiceOutputTypes = | DeleteDeviceProfileCommandOutput | DeleteFuotaTaskCommandOutput | DeleteMulticastGroupCommandOutput + | DeleteQueuedMessagesCommandOutput | DeleteServiceProfileCommandOutput | DeleteWirelessDeviceCommandOutput | DeleteWirelessGatewayCommandOutput @@ -472,6 +480,7 @@ export type ServiceOutputTypes = | ListMulticastGroupsByFuotaTaskCommandOutput | ListMulticastGroupsCommandOutput | ListPartnerAccountsCommandOutput + | ListQueuedMessagesCommandOutput | ListServiceProfilesCommandOutput | ListTagsForResourceCommandOutput | ListWirelessDevicesCommandOutput @@ -648,7 +657,17 @@ type IoTWirelessClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpH export interface IoTWirelessClientResolvedConfig extends IoTWirelessClientResolvedConfigType {} /** - *

              AWS IoT Wireless API documentation

              + *

              AWS IoT Wireless provides bi-directional communication between internet-connected wireless + * devices and the AWS Cloud. To onboard both LoRaWAN and Sidewalk devices to AWS IoT, use the + * IoT Wireless API. These wireless devices use the Low Power Wide Area Networking (LPWAN) + * communication protocol to communicate with AWS IoT.

              + *

              Using the API, you can perform create, read, update, and delete operations for your wireless + * devices, gateways, destinations, and profiles. After onboarding your devices, you + * can use the API operations to set log levels and monitor your devices with CloudWatch.

              + *

              You can also use the API operations to create multicast groups and schedule a multicast session for + * sending a downlink message to devices in the group. By using Firmware Updates Over-The-Air + * (FUOTA) API operations, you can create a FUOTA task and schedule a session to update the firmware + * of individual devices or an entire group of devices in a multicast group.

              */ export class IoTWirelessClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-iot-wireless/src/commands/DeleteQueuedMessagesCommand.ts b/clients/client-iot-wireless/src/commands/DeleteQueuedMessagesCommand.ts new file mode 100644 index 000000000000..31b16d21c41c --- /dev/null +++ b/clients/client-iot-wireless/src/commands/DeleteQueuedMessagesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTWirelessClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTWirelessClient"; +import { DeleteQueuedMessagesRequest, DeleteQueuedMessagesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteQueuedMessagesCommand, + serializeAws_restJson1DeleteQueuedMessagesCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteQueuedMessagesCommandInput extends DeleteQueuedMessagesRequest {} +export interface DeleteQueuedMessagesCommandOutput extends DeleteQueuedMessagesResponse, __MetadataBearer {} + +/** + *

              The operation to delete queued messages.

              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTWirelessClient, DeleteQueuedMessagesCommand } from "@aws-sdk/client-iot-wireless"; // ES Modules import + * // const { IoTWirelessClient, DeleteQueuedMessagesCommand } = require("@aws-sdk/client-iot-wireless"); // CommonJS import + * const client = new IoTWirelessClient(config); + * const command = new DeleteQueuedMessagesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteQueuedMessagesCommandInput} for command's `input` shape. + * @see {@link DeleteQueuedMessagesCommandOutput} for command's `response` shape. + * @see {@link IoTWirelessClientResolvedConfig | config} for IoTWirelessClient's `config` shape. + * + */ +export class DeleteQueuedMessagesCommand extends $Command< + DeleteQueuedMessagesCommandInput, + DeleteQueuedMessagesCommandOutput, + IoTWirelessClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteQueuedMessagesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTWirelessClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTWirelessClient"; + const commandName = "DeleteQueuedMessagesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteQueuedMessagesRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteQueuedMessagesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteQueuedMessagesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteQueuedMessagesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteQueuedMessagesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iot-wireless/src/commands/ListQueuedMessagesCommand.ts b/clients/client-iot-wireless/src/commands/ListQueuedMessagesCommand.ts new file mode 100644 index 000000000000..c7b47e587b8a --- /dev/null +++ b/clients/client-iot-wireless/src/commands/ListQueuedMessagesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTWirelessClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTWirelessClient"; +import { ListQueuedMessagesRequest, ListQueuedMessagesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListQueuedMessagesCommand, + serializeAws_restJson1ListQueuedMessagesCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListQueuedMessagesCommandInput extends ListQueuedMessagesRequest {} +export interface ListQueuedMessagesCommandOutput extends ListQueuedMessagesResponse, __MetadataBearer {} + +/** + *

              The operation to list queued messages.

              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTWirelessClient, ListQueuedMessagesCommand } from "@aws-sdk/client-iot-wireless"; // ES Modules import + * // const { IoTWirelessClient, ListQueuedMessagesCommand } = require("@aws-sdk/client-iot-wireless"); // CommonJS import + * const client = new IoTWirelessClient(config); + * const command = new ListQueuedMessagesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListQueuedMessagesCommandInput} for command's `input` shape. + * @see {@link ListQueuedMessagesCommandOutput} for command's `response` shape. + * @see {@link IoTWirelessClientResolvedConfig | config} for IoTWirelessClient's `config` shape. + * + */ +export class ListQueuedMessagesCommand extends $Command< + ListQueuedMessagesCommandInput, + ListQueuedMessagesCommandOutput, + IoTWirelessClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListQueuedMessagesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTWirelessClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTWirelessClient"; + const commandName = "ListQueuedMessagesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListQueuedMessagesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListQueuedMessagesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListQueuedMessagesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListQueuedMessagesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListQueuedMessagesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iot-wireless/src/commands/index.ts b/clients/client-iot-wireless/src/commands/index.ts index 9ceb0a0e233a..c7d08a92b2f5 100644 --- a/clients/client-iot-wireless/src/commands/index.ts +++ b/clients/client-iot-wireless/src/commands/index.ts @@ -19,6 +19,7 @@ export * from "./DeleteDestinationCommand"; export * from "./DeleteDeviceProfileCommand"; export * from "./DeleteFuotaTaskCommand"; export * from "./DeleteMulticastGroupCommand"; +export * from "./DeleteQueuedMessagesCommand"; export * from "./DeleteServiceProfileCommand"; export * from "./DeleteWirelessDeviceCommand"; export * from "./DeleteWirelessGatewayCommand"; @@ -57,6 +58,7 @@ export * from "./ListFuotaTasksCommand"; export * from "./ListMulticastGroupsByFuotaTaskCommand"; export * from "./ListMulticastGroupsCommand"; export * from "./ListPartnerAccountsCommand"; +export * from "./ListQueuedMessagesCommand"; export * from "./ListServiceProfilesCommand"; export * from "./ListTagsForResourceCommand"; export * from "./ListWirelessDevicesCommand"; diff --git a/clients/client-iot-wireless/src/models/models_0.ts b/clients/client-iot-wireless/src/models/models_0.ts index f7a290dd95b3..202d0cd7ee48 100644 --- a/clients/client-iot-wireless/src/models/models_0.ts +++ b/clients/client-iot-wireless/src/models/models_0.ts @@ -1697,6 +1697,43 @@ export namespace DeleteMulticastGroupResponse { }); } +export interface DeleteQueuedMessagesRequest { + /** + *

              Id of a given wireless device which messages will be deleted

              + */ + Id: string | undefined; + + /** + *

              if messageID=="*", the queue for a particular wireless deviceId will be purged, otherwise, the specific message with messageId will be deleted

              + */ + MessageId: string | undefined; + + /** + *

              The wireless device type, it is either Sidewalk or LoRaWAN.

              + */ + WirelessDeviceType?: WirelessDeviceType | string; +} + +export namespace DeleteQueuedMessagesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteQueuedMessagesRequest): any => ({ + ...obj, + }); +} + +export interface DeleteQueuedMessagesResponse {} + +export namespace DeleteQueuedMessagesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteQueuedMessagesResponse): any => ({ + ...obj, + }); +} + export interface DeleteServiceProfileRequest { /** *

              The ID of the resource to delete.

              @@ -2170,6 +2207,59 @@ export namespace DisassociateWirelessGatewayFromThingResponse { }); } +/** + *

              LoRaWAN router info.

              + */ +export interface LoRaWANSendDataToDevice { + /** + *

              The Fport value.

              + */ + FPort?: number; +} + +export namespace LoRaWANSendDataToDevice { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LoRaWANSendDataToDevice): any => ({ + ...obj, + }); +} + +/** + *

              The message in downlink queue.

              + */ +export interface DownlinkQueueMessage { + /** + *

              The messageId allocated by IoT Wireless for tracing purpose

              + */ + MessageId?: string; + + /** + *

              The transmit mode to use to send data to the wireless device. Can be: 0 for UM (unacknowledge mode) or 1 for AM (acknowledge mode).

              + */ + TransmitMode?: number; + + /** + *

              The timestamp that Iot Wireless received the message.

              + */ + ReceivedAt?: string; + + /** + *

              LoRaWAN router info.

              + */ + LoRaWAN?: LoRaWANSendDataToDevice; +} + +export namespace DownlinkQueueMessage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DownlinkQueueMessage): any => ({ + ...obj, + }); +} + export enum Event { ACK = "ack", DISCOVERED = "discovered", @@ -4107,6 +4197,58 @@ export namespace ListPartnerAccountsResponse { }); } +export interface ListQueuedMessagesRequest { + /** + *

              Id of a given wireless device which the downlink packets are targeted

              + */ + Id: string | undefined; + + /** + *

              To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

              + */ + NextToken?: string; + + /** + *

              The maximum number of results to return in this operation.

              + */ + MaxResults?: number; + + /** + *

              The wireless device type, it is either Sidewalk or LoRaWAN.

              + */ + WirelessDeviceType?: WirelessDeviceType | string; +} + +export namespace ListQueuedMessagesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListQueuedMessagesRequest): any => ({ + ...obj, + }); +} + +export interface ListQueuedMessagesResponse { + /** + *

              To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

              + */ + NextToken?: string; + + /** + *

              The messages in downlink queue.

              + */ + DownlinkQueueMessagesList?: DownlinkQueueMessage[]; +} + +export namespace ListQueuedMessagesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListQueuedMessagesResponse): any => ({ + ...obj, + }); +} + export interface ListServiceProfilesRequest { /** *

              To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

              @@ -4767,25 +4909,6 @@ export namespace SendDataToMulticastGroupResponse { }); } -/** - *

              LoRaWAN router info.

              - */ -export interface LoRaWANSendDataToDevice { - /** - *

              The Fport value.

              - */ - FPort?: number; -} - -export namespace LoRaWANSendDataToDevice { - /** - * @internal - */ - export const filterSensitiveLog = (obj: LoRaWANSendDataToDevice): any => ({ - ...obj, - }); -} - export enum MessageType { CUSTOM_COMMAND_ID_GET = "CUSTOM_COMMAND_ID_GET", CUSTOM_COMMAND_ID_NOTIFY = "CUSTOM_COMMAND_ID_NOTIFY", diff --git a/clients/client-iot-wireless/src/pagination/ListQueuedMessagesPaginator.ts b/clients/client-iot-wireless/src/pagination/ListQueuedMessagesPaginator.ts new file mode 100644 index 000000000000..3c15c862f18e --- /dev/null +++ b/clients/client-iot-wireless/src/pagination/ListQueuedMessagesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListQueuedMessagesCommand, + ListQueuedMessagesCommandInput, + ListQueuedMessagesCommandOutput, +} from "../commands/ListQueuedMessagesCommand"; +import { IoTWireless } from "../IoTWireless"; +import { IoTWirelessClient } from "../IoTWirelessClient"; +import { IoTWirelessPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: IoTWirelessClient, + input: ListQueuedMessagesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListQueuedMessagesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: IoTWireless, + input: ListQueuedMessagesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listQueuedMessages(input, ...args); +}; +export async function* paginateListQueuedMessages( + config: IoTWirelessPaginationConfiguration, + input: ListQueuedMessagesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListQueuedMessagesCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof IoTWireless) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof IoTWirelessClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected IoTWireless | IoTWirelessClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-iot-wireless/src/pagination/index.ts b/clients/client-iot-wireless/src/pagination/index.ts index b17bc8aec266..b64a20a60d01 100644 --- a/clients/client-iot-wireless/src/pagination/index.ts +++ b/clients/client-iot-wireless/src/pagination/index.ts @@ -4,6 +4,7 @@ export * from "./ListDeviceProfilesPaginator"; export * from "./ListFuotaTasksPaginator"; export * from "./ListMulticastGroupsByFuotaTaskPaginator"; export * from "./ListMulticastGroupsPaginator"; +export * from "./ListQueuedMessagesPaginator"; export * from "./ListServiceProfilesPaginator"; export * from "./ListWirelessDevicesPaginator"; export * from "./ListWirelessGatewaysPaginator"; diff --git a/clients/client-iot-wireless/src/protocols/Aws_restJson1.ts b/clients/client-iot-wireless/src/protocols/Aws_restJson1.ts index 65272c9ca90c..e431f5aecde1 100644 --- a/clients/client-iot-wireless/src/protocols/Aws_restJson1.ts +++ b/clients/client-iot-wireless/src/protocols/Aws_restJson1.ts @@ -93,6 +93,10 @@ import { DeleteMulticastGroupCommandInput, DeleteMulticastGroupCommandOutput, } from "../commands/DeleteMulticastGroupCommand"; +import { + DeleteQueuedMessagesCommandInput, + DeleteQueuedMessagesCommandOutput, +} from "../commands/DeleteQueuedMessagesCommand"; import { DeleteServiceProfileCommandInput, DeleteServiceProfileCommandOutput, @@ -209,6 +213,7 @@ import { ListPartnerAccountsCommandInput, ListPartnerAccountsCommandOutput, } from "../commands/ListPartnerAccountsCommand"; +import { ListQueuedMessagesCommandInput, ListQueuedMessagesCommandOutput } from "../commands/ListQueuedMessagesCommand"; import { ListServiceProfilesCommandInput, ListServiceProfilesCommandOutput, @@ -304,6 +309,7 @@ import { Destinations, DeviceProfile, DeviceRegistrationStateEventConfiguration, + DownlinkQueueMessage, FPorts, FuotaTask, InternalServerException, @@ -1027,6 +1033,40 @@ export const serializeAws_restJson1DeleteMulticastGroupCommand = async ( }); }; +export const serializeAws_restJson1DeleteQueuedMessagesCommand = async ( + input: DeleteQueuedMessagesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/wireless-devices/{Id}/data"; + if (input.Id !== undefined) { + const labelValue: string = input.Id; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Id."); + } + resolvedPath = resolvedPath.replace("{Id}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Id."); + } + const query: any = { + ...(input.MessageId !== undefined && { messageId: input.MessageId }), + ...(input.WirelessDeviceType !== undefined && { WirelessDeviceType: input.WirelessDeviceType }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1DeleteServiceProfileCommand = async ( input: DeleteServiceProfileCommandInput, context: __SerdeContext @@ -2147,6 +2187,41 @@ export const serializeAws_restJson1ListPartnerAccountsCommand = async ( }); }; +export const serializeAws_restJson1ListQueuedMessagesCommand = async ( + input: ListQueuedMessagesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/wireless-devices/{Id}/data"; + if (input.Id !== undefined) { + const labelValue: string = input.Id; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Id."); + } + resolvedPath = resolvedPath.replace("{Id}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Id."); + } + const query: any = { + ...(input.NextToken !== undefined && { nextToken: input.NextToken }), + ...(input.MaxResults !== undefined && { maxResults: input.MaxResults.toString() }), + ...(input.WirelessDeviceType !== undefined && { WirelessDeviceType: input.WirelessDeviceType }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1ListServiceProfilesCommand = async ( input: ListServiceProfilesCommandInput, context: __SerdeContext @@ -5000,6 +5075,89 @@ const deserializeAws_restJson1DeleteMulticastGroupCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1DeleteQueuedMessagesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 204 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteQueuedMessagesCommandError(output, context); + } + const contents: DeleteQueuedMessagesCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteQueuedMessagesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iotwireless#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iotwireless#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iotwireless#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iotwireless#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iotwireless#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1DeleteServiceProfileCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -8506,6 +8664,100 @@ const deserializeAws_restJson1ListPartnerAccountsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1ListQueuedMessagesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListQueuedMessagesCommandError(output, context); + } + const contents: ListQueuedMessagesCommandOutput = { + $metadata: deserializeMetadata(output), + DownlinkQueueMessagesList: undefined, + NextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.DownlinkQueueMessagesList !== undefined && data.DownlinkQueueMessagesList !== null) { + contents.DownlinkQueueMessagesList = deserializeAws_restJson1DownlinkQueueMessagesList( + data.DownlinkQueueMessagesList, + context + ); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListQueuedMessagesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iotwireless#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iotwireless#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iotwireless#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iotwireless#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iotwireless#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1ListServiceProfilesCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -11481,6 +11733,32 @@ const deserializeAws_restJson1DeviceRegistrationStateEventConfiguration = ( } as any; }; +const deserializeAws_restJson1DownlinkQueueMessage = (output: any, context: __SerdeContext): DownlinkQueueMessage => { + return { + LoRaWAN: + output.LoRaWAN !== undefined && output.LoRaWAN !== null + ? deserializeAws_restJson1LoRaWANSendDataToDevice(output.LoRaWAN, context) + : undefined, + MessageId: __expectString(output.MessageId), + ReceivedAt: __expectString(output.ReceivedAt), + TransmitMode: __expectInt32(output.TransmitMode), + } as any; +}; + +const deserializeAws_restJson1DownlinkQueueMessagesList = ( + output: any, + context: __SerdeContext +): DownlinkQueueMessage[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1DownlinkQueueMessage(entry, context); + }); +}; + const deserializeAws_restJson1FactoryPresetFreqsList = (output: any, context: __SerdeContext): number[] => { return (output || []) .filter((e: any) => e != null) @@ -11744,6 +12022,15 @@ const deserializeAws_restJson1LoRaWANMulticastSession = ( } as any; }; +const deserializeAws_restJson1LoRaWANSendDataToDevice = ( + output: any, + context: __SerdeContext +): LoRaWANSendDataToDevice => { + return { + FPort: __expectInt32(output.FPort), + } as any; +}; + const deserializeAws_restJson1LoRaWANUpdateGatewayTaskCreate = ( output: any, context: __SerdeContext diff --git a/clients/client-iot/src/commands/DeleteScheduledAuditCommand.ts b/clients/client-iot/src/commands/DeleteScheduledAuditCommand.ts index 5dea5f2735fd..c3af6ca5e9d0 100644 --- a/clients/client-iot/src/commands/DeleteScheduledAuditCommand.ts +++ b/clients/client-iot/src/commands/DeleteScheduledAuditCommand.ts @@ -12,7 +12,7 @@ import { } from "@aws-sdk/types"; import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { DeleteScheduledAuditRequest, DeleteScheduledAuditResponse } from "../models/models_0"; +import { DeleteScheduledAuditRequest, DeleteScheduledAuditResponse } from "../models/models_1"; import { deserializeAws_restJson1DeleteScheduledAuditCommand, serializeAws_restJson1DeleteScheduledAuditCommand, diff --git a/clients/client-iot/src/commands/DeleteSecurityProfileCommand.ts b/clients/client-iot/src/commands/DeleteSecurityProfileCommand.ts index ec327a526bd0..f85f82fdf22f 100644 --- a/clients/client-iot/src/commands/DeleteSecurityProfileCommand.ts +++ b/clients/client-iot/src/commands/DeleteSecurityProfileCommand.ts @@ -12,8 +12,7 @@ import { } from "@aws-sdk/types"; import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { DeleteSecurityProfileRequest } from "../models/models_0"; -import { DeleteSecurityProfileResponse } from "../models/models_1"; +import { DeleteSecurityProfileRequest, DeleteSecurityProfileResponse } from "../models/models_1"; import { deserializeAws_restJson1DeleteSecurityProfileCommand, serializeAws_restJson1DeleteSecurityProfileCommand, diff --git a/clients/client-iot/src/commands/ListThingRegistrationTaskReportsCommand.ts b/clients/client-iot/src/commands/ListThingRegistrationTaskReportsCommand.ts index 317e1ca2d54d..e3b5959bb1a7 100644 --- a/clients/client-iot/src/commands/ListThingRegistrationTaskReportsCommand.ts +++ b/clients/client-iot/src/commands/ListThingRegistrationTaskReportsCommand.ts @@ -12,7 +12,7 @@ import { } from "@aws-sdk/types"; import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { ListThingRegistrationTaskReportsRequest, ListThingRegistrationTaskReportsResponse } from "../models/models_1"; +import { ListThingRegistrationTaskReportsRequest, ListThingRegistrationTaskReportsResponse } from "../models/models_2"; import { deserializeAws_restJson1ListThingRegistrationTaskReportsCommand, serializeAws_restJson1ListThingRegistrationTaskReportsCommand, diff --git a/clients/client-iot/src/commands/ListThingRegistrationTasksCommand.ts b/clients/client-iot/src/commands/ListThingRegistrationTasksCommand.ts index 6afba4c209c4..002db93521ff 100644 --- a/clients/client-iot/src/commands/ListThingRegistrationTasksCommand.ts +++ b/clients/client-iot/src/commands/ListThingRegistrationTasksCommand.ts @@ -12,8 +12,7 @@ import { } from "@aws-sdk/types"; import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { ListThingRegistrationTasksRequest } from "../models/models_1"; -import { ListThingRegistrationTasksResponse } from "../models/models_2"; +import { ListThingRegistrationTasksRequest, ListThingRegistrationTasksResponse } from "../models/models_2"; import { deserializeAws_restJson1ListThingRegistrationTasksCommand, serializeAws_restJson1ListThingRegistrationTasksCommand, diff --git a/clients/client-iot/src/models/models_0.ts b/clients/client-iot/src/models/models_0.ts index fb3f2ceb06f5..7d36c72022e1 100644 --- a/clients/client-iot/src/models/models_0.ts +++ b/clients/client-iot/src/models/models_0.ts @@ -4537,6 +4537,58 @@ export namespace InvalidAggregationException { }); } +export enum RetryableFailureType { + ALL = "ALL", + FAILED = "FAILED", + TIMED_OUT = "TIMED_OUT", +} + +/** + *

              The criteria that determines how many retries are allowed for each failure + * type for a job.

              + */ +export interface RetryCriteria { + /** + *

              The type of job execution failures that can initiate a job retry.

              + */ + failureType: RetryableFailureType | string | undefined; + + /** + *

              The number of retries allowed for a failure type for the job.

              + */ + numberOfRetries: number | undefined; +} + +export namespace RetryCriteria { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RetryCriteria): any => ({ + ...obj, + }); +} + +/** + *

              The configuration that determines how many retries are allowed for each failure + * type for a job.

              + */ +export interface JobExecutionsRetryConfig { + /** + *

              The list of criteria that determines how many retries are allowed for each failure + * type for a job.

              + */ + criteriaList: RetryCriteria[] | undefined; +} + +export namespace JobExecutionsRetryConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: JobExecutionsRetryConfig): any => ({ + ...obj, + }); +} + /** *

              Allows you to define a criteria to initiate the increase in rate of rollout for a job.

              */ @@ -4729,7 +4781,7 @@ export interface CreateJobRequest { jobExecutionsRolloutConfig?: JobExecutionsRolloutConfig; /** - *

              Allows you to create criteria to abort a job.

              + *

              Allows you to create the criteria to abort a job.

              */ abortConfig?: AbortConfig; @@ -4764,6 +4816,11 @@ export interface CreateJobRequest { */ jobTemplateArn?: string; + /** + *

              Allows you to create the criteria to retry a job.

              + */ + jobExecutionsRetryConfig?: JobExecutionsRetryConfig; + /** *

              Parameters of a managed template that you can specify to create the job document.

              */ @@ -4885,6 +4942,11 @@ export interface CreateJobTemplateRequest { *

              Metadata that can be used to manage the job template.

              */ tags?: Tag[]; + + /** + *

              Allows you to create the criteria to retry a job.

              + */ + jobExecutionsRetryConfig?: JobExecutionsRetryConfig; } export namespace CreateJobTemplateRequest { @@ -7844,53 +7906,3 @@ export namespace DeleteRoleAliasResponse { ...obj, }); } - -export interface DeleteScheduledAuditRequest { - /** - *

              The name of the scheduled audit you want to delete.

              - */ - scheduledAuditName: string | undefined; -} - -export namespace DeleteScheduledAuditRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DeleteScheduledAuditRequest): any => ({ - ...obj, - }); -} - -export interface DeleteScheduledAuditResponse {} - -export namespace DeleteScheduledAuditResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DeleteScheduledAuditResponse): any => ({ - ...obj, - }); -} - -export interface DeleteSecurityProfileRequest { - /** - *

              The name of the security profile to be deleted.

              - */ - securityProfileName: string | undefined; - - /** - *

              The expected version of the security profile. A new version is generated whenever - * the security profile is updated. If you specify a value that is different from the actual - * version, a VersionConflictException is thrown.

              - */ - expectedVersion?: number; -} - -export namespace DeleteSecurityProfileRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DeleteSecurityProfileRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-iot/src/models/models_1.ts b/clients/client-iot/src/models/models_1.ts index 4d02e978bba5..5913acd67409 100644 --- a/clients/client-iot/src/models/models_1.ts +++ b/clients/client-iot/src/models/models_1.ts @@ -33,6 +33,7 @@ import { DayOfWeek, DimensionType, FleetMetricUnit, + JobExecutionsRetryConfig, JobExecutionsRolloutConfig, LogLevel, MetricToRetain, @@ -56,6 +57,56 @@ import { VerificationState, } from "./models_0"; +export interface DeleteScheduledAuditRequest { + /** + *

              The name of the scheduled audit you want to delete.

              + */ + scheduledAuditName: string | undefined; +} + +export namespace DeleteScheduledAuditRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteScheduledAuditRequest): any => ({ + ...obj, + }); +} + +export interface DeleteScheduledAuditResponse {} + +export namespace DeleteScheduledAuditResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteScheduledAuditResponse): any => ({ + ...obj, + }); +} + +export interface DeleteSecurityProfileRequest { + /** + *

              The name of the security profile to be deleted.

              + */ + securityProfileName: string | undefined; + + /** + *

              The expected version of the security profile. A new version is generated whenever + * the security profile is updated. If you specify a value that is different from the actual + * version, a VersionConflictException is thrown.

              + */ + expectedVersion?: number; +} + +export namespace DeleteSecurityProfileRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteSecurityProfileRequest): any => ({ + ...obj, + }); +} + export interface DeleteSecurityProfileResponse {} export namespace DeleteSecurityProfileResponse { @@ -2114,6 +2165,11 @@ export interface Job { */ jobTemplateArn?: string; + /** + *

              The configuration for the criteria to retry the job.

              + */ + jobExecutionsRetryConfig?: JobExecutionsRetryConfig; + /** *

              A key-value map that pairs the patterns that need to be replaced in a managed * template job document schema. You can use the description of each key as a guidance @@ -2371,6 +2427,12 @@ export interface DescribeJobTemplateResponse { * be automatically set to TIMED_OUT.

              */ timeoutConfig?: TimeoutConfig; + + /** + *

              The configuration that determines how many retries are allowed for each failure type + * for a job.

              + */ + jobExecutionsRetryConfig?: JobExecutionsRetryConfig; } export namespace DescribeJobTemplateResponse { @@ -6370,6 +6432,12 @@ export interface JobExecutionSummary { * information.

              */ executionNumber?: number; + + /** + *

              The number that indicates how many retry attempts have been completed for this + * job on this device.

              + */ + retryAttempt?: number; } export namespace JobExecutionSummary { @@ -6460,6 +6528,11 @@ export interface ListJobExecutionsForThingRequest { *

              The token to retrieve the next set of results.

              */ nextToken?: string; + + /** + *

              The unique identifier you assigned to this job when it was created.

              + */ + jobId?: string; } export namespace ListJobExecutionsForThingRequest { @@ -8224,90 +8297,3 @@ export enum ReportType { ERRORS = "ERRORS", RESULTS = "RESULTS", } - -export interface ListThingRegistrationTaskReportsRequest { - /** - *

              The id of the task.

              - */ - taskId: string | undefined; - - /** - *

              The type of task report.

              - */ - reportType: ReportType | string | undefined; - - /** - *

              To retrieve the next set of results, the nextToken - * value from a previous response; otherwise null to receive - * the first set of results.

              - */ - nextToken?: string; - - /** - *

              The maximum number of results to return per request.

              - */ - maxResults?: number; -} - -export namespace ListThingRegistrationTaskReportsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListThingRegistrationTaskReportsRequest): any => ({ - ...obj, - }); -} - -export interface ListThingRegistrationTaskReportsResponse { - /** - *

              Links to the task resources.

              - */ - resourceLinks?: string[]; - - /** - *

              The type of task report.

              - */ - reportType?: ReportType | string; - - /** - *

              The token to use to get the next set of results, or null if there are no additional results.

              - */ - nextToken?: string; -} - -export namespace ListThingRegistrationTaskReportsResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListThingRegistrationTaskReportsResponse): any => ({ - ...obj, - }); -} - -export interface ListThingRegistrationTasksRequest { - /** - *

              To retrieve the next set of results, the nextToken - * value from a previous response; otherwise null to receive - * the first set of results.

              - */ - nextToken?: string; - - /** - *

              The maximum number of results to return at one time.

              - */ - maxResults?: number; - - /** - *

              The status of the bulk thing provisioning task.

              - */ - status?: Status | string; -} - -export namespace ListThingRegistrationTasksRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListThingRegistrationTasksRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-iot/src/models/models_2.ts b/clients/client-iot/src/models/models_2.ts index 4da8315ebfa7..f1b329703687 100644 --- a/clients/client-iot/src/models/models_2.ts +++ b/clients/client-iot/src/models/models_2.ts @@ -20,6 +20,7 @@ import { DayOfWeek, DimensionType, FleetMetricUnit, + JobExecutionsRetryConfig, JobExecutionsRolloutConfig, LogLevel, MetricToRetain, @@ -47,12 +48,101 @@ import { DomainConfigurationStatus, LogTargetType, RegistrationConfig, + ReportType, + Status, ThingGroupIndexingConfiguration, ThingIndexingConfiguration, ThingTypeMetadata, ViolationEventOccurrenceRange, } from "./models_1"; +export interface ListThingRegistrationTaskReportsRequest { + /** + *

              The id of the task.

              + */ + taskId: string | undefined; + + /** + *

              The type of task report.

              + */ + reportType: ReportType | string | undefined; + + /** + *

              To retrieve the next set of results, the nextToken + * value from a previous response; otherwise null to receive + * the first set of results.

              + */ + nextToken?: string; + + /** + *

              The maximum number of results to return per request.

              + */ + maxResults?: number; +} + +export namespace ListThingRegistrationTaskReportsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListThingRegistrationTaskReportsRequest): any => ({ + ...obj, + }); +} + +export interface ListThingRegistrationTaskReportsResponse { + /** + *

              Links to the task resources.

              + */ + resourceLinks?: string[]; + + /** + *

              The type of task report.

              + */ + reportType?: ReportType | string; + + /** + *

              The token to use to get the next set of results, or null if there are no additional results.

              + */ + nextToken?: string; +} + +export namespace ListThingRegistrationTaskReportsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListThingRegistrationTaskReportsResponse): any => ({ + ...obj, + }); +} + +export interface ListThingRegistrationTasksRequest { + /** + *

              To retrieve the next set of results, the nextToken + * value from a previous response; otherwise null to receive + * the first set of results.

              + */ + nextToken?: string; + + /** + *

              The maximum number of results to return at one time.

              + */ + maxResults?: number; + + /** + *

              The status of the bulk thing provisioning task.

              + */ + status?: Status | string; +} + +export namespace ListThingRegistrationTasksRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListThingRegistrationTasksRequest): any => ({ + ...obj, + }); +} + export interface ListThingRegistrationTasksResponse { /** *

              A list of bulk thing provisioning task IDs.

              @@ -3059,6 +3149,11 @@ export interface UpdateJobRequest { * */ namespaceId?: string; + + /** + *

              Allows you to create the criteria to retry a job.

              + */ + jobExecutionsRetryConfig?: JobExecutionsRetryConfig; } export namespace UpdateJobRequest { diff --git a/clients/client-iot/src/protocols/Aws_restJson1.ts b/clients/client-iot/src/protocols/Aws_restJson1.ts index 408b5edbd98d..71b4852c71d6 100644 --- a/clients/client-iot/src/protocols/Aws_restJson1.ts +++ b/clients/client-iot/src/protocols/Aws_restJson1.ts @@ -744,6 +744,7 @@ import { IotAnalyticsAction, IotEventsAction, IotSiteWiseAction, + JobExecutionsRetryConfig, JobExecutionsRolloutConfig, KafkaAction, KeyPair, @@ -774,6 +775,7 @@ import { ResourceAlreadyExistsException, ResourceIdentifier, ResourceNotFoundException, + RetryCriteria, S3Action, S3Destination, S3Location, @@ -1825,6 +1827,13 @@ export const serializeAws_restJson1CreateJobCommand = async ( }), ...(input.documentSource !== undefined && input.documentSource !== null && { documentSource: input.documentSource }), + ...(input.jobExecutionsRetryConfig !== undefined && + input.jobExecutionsRetryConfig !== null && { + jobExecutionsRetryConfig: serializeAws_restJson1JobExecutionsRetryConfig( + input.jobExecutionsRetryConfig, + context + ), + }), ...(input.jobExecutionsRolloutConfig !== undefined && input.jobExecutionsRolloutConfig !== null && { jobExecutionsRolloutConfig: serializeAws_restJson1JobExecutionsRolloutConfig( @@ -1889,6 +1898,13 @@ export const serializeAws_restJson1CreateJobTemplateCommand = async ( ...(input.documentSource !== undefined && input.documentSource !== null && { documentSource: input.documentSource }), ...(input.jobArn !== undefined && input.jobArn !== null && { jobArn: input.jobArn }), + ...(input.jobExecutionsRetryConfig !== undefined && + input.jobExecutionsRetryConfig !== null && { + jobExecutionsRetryConfig: serializeAws_restJson1JobExecutionsRetryConfig( + input.jobExecutionsRetryConfig, + context + ), + }), ...(input.jobExecutionsRolloutConfig !== undefined && input.jobExecutionsRolloutConfig !== null && { jobExecutionsRolloutConfig: serializeAws_restJson1JobExecutionsRolloutConfig( @@ -5769,6 +5785,7 @@ export const serializeAws_restJson1ListJobExecutionsForThingCommand = async ( ...(input.namespaceId !== undefined && { namespaceId: input.namespaceId }), ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.jobId !== undefined && { jobId: input.jobId }), }; let body: any; return new __HttpRequest({ @@ -8087,6 +8104,13 @@ export const serializeAws_restJson1UpdateJobCommand = async ( ...(input.abortConfig !== undefined && input.abortConfig !== null && { abortConfig: serializeAws_restJson1AbortConfig(input.abortConfig, context) }), ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.jobExecutionsRetryConfig !== undefined && + input.jobExecutionsRetryConfig !== null && { + jobExecutionsRetryConfig: serializeAws_restJson1JobExecutionsRetryConfig( + input.jobExecutionsRetryConfig, + context + ), + }), ...(input.jobExecutionsRolloutConfig !== undefined && input.jobExecutionsRolloutConfig !== null && { jobExecutionsRolloutConfig: serializeAws_restJson1JobExecutionsRolloutConfig( @@ -17505,6 +17529,7 @@ export const deserializeAws_restJson1DescribeJobTemplateCommand = async ( description: undefined, document: undefined, documentSource: undefined, + jobExecutionsRetryConfig: undefined, jobExecutionsRolloutConfig: undefined, jobTemplateArn: undefined, jobTemplateId: undefined, @@ -17527,6 +17552,12 @@ export const deserializeAws_restJson1DescribeJobTemplateCommand = async ( if (data.documentSource !== undefined && data.documentSource !== null) { contents.documentSource = __expectString(data.documentSource); } + if (data.jobExecutionsRetryConfig !== undefined && data.jobExecutionsRetryConfig !== null) { + contents.jobExecutionsRetryConfig = deserializeAws_restJson1JobExecutionsRetryConfig( + data.jobExecutionsRetryConfig, + context + ); + } if (data.jobExecutionsRolloutConfig !== undefined && data.jobExecutionsRolloutConfig !== null) { contents.jobExecutionsRolloutConfig = deserializeAws_restJson1JobExecutionsRolloutConfig( data.jobExecutionsRolloutConfig, @@ -31820,6 +31851,18 @@ const serializeAws_restJson1IotSiteWiseAction = (input: IotSiteWiseAction, conte }; }; +const serializeAws_restJson1JobExecutionsRetryConfig = ( + input: JobExecutionsRetryConfig, + context: __SerdeContext +): any => { + return { + ...(input.criteriaList !== undefined && + input.criteriaList !== null && { + criteriaList: serializeAws_restJson1RetryCriteriaList(input.criteriaList, context), + }), + }; +}; + const serializeAws_restJson1JobExecutionsRolloutConfig = ( input: JobExecutionsRolloutConfig, context: __SerdeContext @@ -32263,6 +32306,25 @@ const serializeAws_restJson1Resources = (input: string[], context: __SerdeContex }); }; +const serializeAws_restJson1RetryCriteria = (input: RetryCriteria, context: __SerdeContext): any => { + return { + ...(input.failureType !== undefined && input.failureType !== null && { failureType: input.failureType }), + ...(input.numberOfRetries !== undefined && + input.numberOfRetries !== null && { numberOfRetries: input.numberOfRetries }), + }; +}; + +const serializeAws_restJson1RetryCriteriaList = (input: RetryCriteria[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1RetryCriteria(entry, context); + }); +}; + const serializeAws_restJson1S3Action = (input: S3Action, context: __SerdeContext): any => { return { ...(input.bucketName !== undefined && input.bucketName !== null && { bucketName: input.bucketName }), @@ -34431,6 +34493,10 @@ const deserializeAws_restJson1Job = (output: any, context: __SerdeContext): Job : undefined, forceCanceled: __expectBoolean(output.forceCanceled), jobArn: __expectString(output.jobArn), + jobExecutionsRetryConfig: + output.jobExecutionsRetryConfig !== undefined && output.jobExecutionsRetryConfig !== null + ? deserializeAws_restJson1JobExecutionsRetryConfig(output.jobExecutionsRetryConfig, context) + : undefined, jobExecutionsRolloutConfig: output.jobExecutionsRolloutConfig !== undefined && output.jobExecutionsRolloutConfig !== null ? deserializeAws_restJson1JobExecutionsRolloutConfig(output.jobExecutionsRolloutConfig, context) @@ -34492,6 +34558,18 @@ const deserializeAws_restJson1JobExecution = (output: any, context: __SerdeConte } as any; }; +const deserializeAws_restJson1JobExecutionsRetryConfig = ( + output: any, + context: __SerdeContext +): JobExecutionsRetryConfig => { + return { + criteriaList: + output.criteriaList !== undefined && output.criteriaList !== null + ? deserializeAws_restJson1RetryCriteriaList(output.criteriaList, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1JobExecutionsRolloutConfig = ( output: any, context: __SerdeContext @@ -34528,6 +34606,7 @@ const deserializeAws_restJson1JobExecutionSummary = (output: any, context: __Ser output.queuedAt !== undefined && output.queuedAt !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.queuedAt))) : undefined, + retryAttempt: __expectInt32(output.retryAttempt), startedAt: output.startedAt !== undefined && output.startedAt !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.startedAt))) @@ -35486,6 +35565,24 @@ const deserializeAws_restJson1Resources = (output: any, context: __SerdeContext) }); }; +const deserializeAws_restJson1RetryCriteria = (output: any, context: __SerdeContext): RetryCriteria => { + return { + failureType: __expectString(output.failureType), + numberOfRetries: __expectInt32(output.numberOfRetries), + } as any; +}; + +const deserializeAws_restJson1RetryCriteriaList = (output: any, context: __SerdeContext): RetryCriteria[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1RetryCriteria(entry, context); + }); +}; + const deserializeAws_restJson1RoleAliasDescription = (output: any, context: __SerdeContext): RoleAliasDescription => { return { creationDate: diff --git a/clients/client-lakeformation/src/LakeFormation.ts b/clients/client-lakeformation/src/LakeFormation.ts index 3a2280176360..7ed8de69a352 100644 --- a/clients/client-lakeformation/src/LakeFormation.ts +++ b/clients/client-lakeformation/src/LakeFormation.ts @@ -93,6 +93,16 @@ import { GetTableObjectsCommandInput, GetTableObjectsCommandOutput, } from "./commands/GetTableObjectsCommand"; +import { + GetTemporaryGluePartitionCredentialsCommand, + GetTemporaryGluePartitionCredentialsCommandInput, + GetTemporaryGluePartitionCredentialsCommandOutput, +} from "./commands/GetTemporaryGluePartitionCredentialsCommand"; +import { + GetTemporaryGlueTableCredentialsCommand, + GetTemporaryGlueTableCredentialsCommandInput, + GetTemporaryGlueTableCredentialsCommandOutput, +} from "./commands/GetTemporaryGlueTableCredentialsCommand"; import { GetWorkUnitResultsCommand, GetWorkUnitResultsCommandInput, @@ -863,6 +873,70 @@ export class LakeFormation extends LakeFormationClient { } } + /** + *

              This API is identical to GetTemporaryTableCredentials except that this is used when the target Data Catalog resource is of type Partition. Lake Formation restricts the permission of the vended credentials with the same scope down policy which restricts access to a single Amazon S3 prefix.

              + */ + public getTemporaryGluePartitionCredentials( + args: GetTemporaryGluePartitionCredentialsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getTemporaryGluePartitionCredentials( + args: GetTemporaryGluePartitionCredentialsCommandInput, + cb: (err: any, data?: GetTemporaryGluePartitionCredentialsCommandOutput) => void + ): void; + public getTemporaryGluePartitionCredentials( + args: GetTemporaryGluePartitionCredentialsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetTemporaryGluePartitionCredentialsCommandOutput) => void + ): void; + public getTemporaryGluePartitionCredentials( + args: GetTemporaryGluePartitionCredentialsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetTemporaryGluePartitionCredentialsCommandOutput) => void), + cb?: (err: any, data?: GetTemporaryGluePartitionCredentialsCommandOutput) => void + ): Promise | void { + const command = new GetTemporaryGluePartitionCredentialsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

              Allows a caller in a secure environment to assume a role with permission to access Amazon S3. In order to vend such credentials, Lake Formation assumes the role associated with a registered location, for example an Amazon S3 bucket, with a scope down policy which restricts the access to a single prefix.

              + */ + public getTemporaryGlueTableCredentials( + args: GetTemporaryGlueTableCredentialsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getTemporaryGlueTableCredentials( + args: GetTemporaryGlueTableCredentialsCommandInput, + cb: (err: any, data?: GetTemporaryGlueTableCredentialsCommandOutput) => void + ): void; + public getTemporaryGlueTableCredentials( + args: GetTemporaryGlueTableCredentialsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetTemporaryGlueTableCredentialsCommandOutput) => void + ): void; + public getTemporaryGlueTableCredentials( + args: GetTemporaryGlueTableCredentialsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetTemporaryGlueTableCredentialsCommandOutput) => void), + cb?: (err: any, data?: GetTemporaryGlueTableCredentialsCommandOutput) => void + ): Promise | void { + const command = new GetTemporaryGlueTableCredentialsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

              Returns the work units resulting from the query. Work units can be executed in any order and in parallel.

              */ diff --git a/clients/client-lakeformation/src/LakeFormationClient.ts b/clients/client-lakeformation/src/LakeFormationClient.ts index e88729e6004a..652c88c7975f 100644 --- a/clients/client-lakeformation/src/LakeFormationClient.ts +++ b/clients/client-lakeformation/src/LakeFormationClient.ts @@ -97,6 +97,14 @@ import { GetQueryStateCommandInput, GetQueryStateCommandOutput } from "./command import { GetQueryStatisticsCommandInput, GetQueryStatisticsCommandOutput } from "./commands/GetQueryStatisticsCommand"; import { GetResourceLFTagsCommandInput, GetResourceLFTagsCommandOutput } from "./commands/GetResourceLFTagsCommand"; import { GetTableObjectsCommandInput, GetTableObjectsCommandOutput } from "./commands/GetTableObjectsCommand"; +import { + GetTemporaryGluePartitionCredentialsCommandInput, + GetTemporaryGluePartitionCredentialsCommandOutput, +} from "./commands/GetTemporaryGluePartitionCredentialsCommand"; +import { + GetTemporaryGlueTableCredentialsCommandInput, + GetTemporaryGlueTableCredentialsCommandOutput, +} from "./commands/GetTemporaryGlueTableCredentialsCommand"; import { GetWorkUnitResultsCommandInput, GetWorkUnitResultsCommandOutput } from "./commands/GetWorkUnitResultsCommand"; import { GetWorkUnitsCommandInput, GetWorkUnitsCommandOutput } from "./commands/GetWorkUnitsCommand"; import { GrantPermissionsCommandInput, GrantPermissionsCommandOutput } from "./commands/GrantPermissionsCommand"; @@ -163,6 +171,8 @@ export type ServiceInputTypes = | GetQueryStatisticsCommandInput | GetResourceLFTagsCommandInput | GetTableObjectsCommandInput + | GetTemporaryGluePartitionCredentialsCommandInput + | GetTemporaryGlueTableCredentialsCommandInput | GetWorkUnitResultsCommandInput | GetWorkUnitsCommandInput | GrantPermissionsCommandInput @@ -207,6 +217,8 @@ export type ServiceOutputTypes = | GetQueryStatisticsCommandOutput | GetResourceLFTagsCommandOutput | GetTableObjectsCommandOutput + | GetTemporaryGluePartitionCredentialsCommandOutput + | GetTemporaryGlueTableCredentialsCommandOutput | GetWorkUnitResultsCommandOutput | GetWorkUnitsCommandOutput | GrantPermissionsCommandOutput diff --git a/clients/client-lakeformation/src/commands/GetTemporaryGluePartitionCredentialsCommand.ts b/clients/client-lakeformation/src/commands/GetTemporaryGluePartitionCredentialsCommand.ts new file mode 100644 index 000000000000..7a0e25c4d34a --- /dev/null +++ b/clients/client-lakeformation/src/commands/GetTemporaryGluePartitionCredentialsCommand.ts @@ -0,0 +1,106 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { + GetTemporaryGluePartitionCredentialsRequest, + GetTemporaryGluePartitionCredentialsResponse, +} from "../models/models_0"; +import { + deserializeAws_restJson1GetTemporaryGluePartitionCredentialsCommand, + serializeAws_restJson1GetTemporaryGluePartitionCredentialsCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetTemporaryGluePartitionCredentialsCommandInput extends GetTemporaryGluePartitionCredentialsRequest {} +export interface GetTemporaryGluePartitionCredentialsCommandOutput + extends GetTemporaryGluePartitionCredentialsResponse, + __MetadataBearer {} + +/** + *

              This API is identical to GetTemporaryTableCredentials except that this is used when the target Data Catalog resource is of type Partition. Lake Formation restricts the permission of the vended credentials with the same scope down policy which restricts access to a single Amazon S3 prefix.

              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, GetTemporaryGluePartitionCredentialsCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, GetTemporaryGluePartitionCredentialsCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new GetTemporaryGluePartitionCredentialsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetTemporaryGluePartitionCredentialsCommandInput} for command's `input` shape. + * @see {@link GetTemporaryGluePartitionCredentialsCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class GetTemporaryGluePartitionCredentialsCommand extends $Command< + GetTemporaryGluePartitionCredentialsCommandInput, + GetTemporaryGluePartitionCredentialsCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetTemporaryGluePartitionCredentialsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "GetTemporaryGluePartitionCredentialsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetTemporaryGluePartitionCredentialsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetTemporaryGluePartitionCredentialsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: GetTemporaryGluePartitionCredentialsCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1GetTemporaryGluePartitionCredentialsCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1GetTemporaryGluePartitionCredentialsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/GetTemporaryGlueTableCredentialsCommand.ts b/clients/client-lakeformation/src/commands/GetTemporaryGlueTableCredentialsCommand.ts new file mode 100644 index 000000000000..2707506755f0 --- /dev/null +++ b/clients/client-lakeformation/src/commands/GetTemporaryGlueTableCredentialsCommand.ts @@ -0,0 +1,103 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { GetTemporaryGlueTableCredentialsRequest, GetTemporaryGlueTableCredentialsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetTemporaryGlueTableCredentialsCommand, + serializeAws_restJson1GetTemporaryGlueTableCredentialsCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetTemporaryGlueTableCredentialsCommandInput extends GetTemporaryGlueTableCredentialsRequest {} +export interface GetTemporaryGlueTableCredentialsCommandOutput + extends GetTemporaryGlueTableCredentialsResponse, + __MetadataBearer {} + +/** + *

              Allows a caller in a secure environment to assume a role with permission to access Amazon S3. In order to vend such credentials, Lake Formation assumes the role associated with a registered location, for example an Amazon S3 bucket, with a scope down policy which restricts the access to a single prefix.

              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, GetTemporaryGlueTableCredentialsCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, GetTemporaryGlueTableCredentialsCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new GetTemporaryGlueTableCredentialsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetTemporaryGlueTableCredentialsCommandInput} for command's `input` shape. + * @see {@link GetTemporaryGlueTableCredentialsCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class GetTemporaryGlueTableCredentialsCommand extends $Command< + GetTemporaryGlueTableCredentialsCommandInput, + GetTemporaryGlueTableCredentialsCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetTemporaryGlueTableCredentialsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "GetTemporaryGlueTableCredentialsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetTemporaryGlueTableCredentialsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetTemporaryGlueTableCredentialsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: GetTemporaryGlueTableCredentialsCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1GetTemporaryGlueTableCredentialsCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1GetTemporaryGlueTableCredentialsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/index.ts b/clients/client-lakeformation/src/commands/index.ts index 1f5d10ed566c..26b8547416ea 100644 --- a/clients/client-lakeformation/src/commands/index.ts +++ b/clients/client-lakeformation/src/commands/index.ts @@ -19,6 +19,8 @@ export * from "./GetQueryStateCommand"; export * from "./GetQueryStatisticsCommand"; export * from "./GetResourceLFTagsCommand"; export * from "./GetTableObjectsCommand"; +export * from "./GetTemporaryGluePartitionCredentialsCommand"; +export * from "./GetTemporaryGlueTableCredentialsCommand"; export * from "./GetWorkUnitResultsCommand"; export * from "./GetWorkUnitsCommand"; export * from "./GrantPermissionsCommand"; diff --git a/clients/client-lakeformation/src/models/models_0.ts b/clients/client-lakeformation/src/models/models_0.ts index 6e1ba98ea7ed..77f88b3255e3 100644 --- a/clients/client-lakeformation/src/models/models_0.ts +++ b/clients/client-lakeformation/src/models/models_0.ts @@ -663,6 +663,25 @@ export namespace AlreadyExistsException { }); } +/** + *

              A structure used to include auditing information on the privileged API.

              + */ +export interface AuditContext { + /** + *

              The filter engine can populate the 'AdditionalAuditContext' information with the request ID for you to track. This information will be displayed in CloudTrail log in your account.

              + */ + AdditionalAuditContext?: string; +} + +export namespace AuditContext { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AuditContext): any => ({ + ...obj, + }); +} + export enum Permission { ALL = "ALL", ALTER = "ALTER", @@ -682,7 +701,8 @@ export enum Permission { } /** - *

              The AWS Lake Formation principal. Supported principals are IAM users or IAM roles.

              + *

              The Lake Formation principal. Supported principals are IAM users + * or IAM roles.

              */ export interface DataLakePrincipal { /** @@ -1026,6 +1046,9 @@ export interface DataCellsFilter { /** *

              A wildcard with exclusions.

              + * + *

              You must specify either a ColumnNames list or the + * ColumnWildCard.

              */ ColumnWildcard?: ColumnWildcard; } @@ -1549,6 +1572,27 @@ export interface DataLakeSettings { *

              You may want to specify this property when you are in a high-trust boundary, such as the same team or company.

              */ TrustedResourceOwners?: string[]; + + /** + *

              Whether to allow Amazon EMR clusters to access data managed by Lake Formation.

              + * + *

              If true, you allow Amazon EMR clusters to access data in Amazon S3 locations that are registered with Lake Formation.

              + * + *

              If false or null, no Amazon EMR clusters will be able to access data in Amazon S3 locations that are registered with Lake Formation.

              + * + *

              For more information, see (Optional) Allow Data Filtering on Amazon EMR.

              + */ + AllowExternalDataFiltering?: boolean; + + /** + *

              A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering.>

              + */ + ExternalDataFilteringAllowList?: DataLakePrincipal[]; + + /** + *

              Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. Lake Formation will publish the acceptable key-value pair, for example key = "LakeFormationTrustedCaller" and value = "TRUE" and the third party integrator must properly tag the temporary security credentials that will be used to call Lake Formation's administrative APIs.

              + */ + AuthorizedSessionTagValueList?: string[]; } export namespace DataLakeSettings { @@ -2202,6 +2246,190 @@ export namespace GetTableObjectsResponse { }); } +/** + *

              Contains a list of values defining partitions.

              + */ +export interface PartitionValueList { + /** + *

              The list of partition values.

              + */ + Values: string[] | undefined; +} + +export namespace PartitionValueList { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PartitionValueList): any => ({ + ...obj, + }); +} + +export enum PermissionType { + CELL_FILTER_PERMISSION = "CELL_FILTER_PERMISSION", + COLUMN_PERMISSION = "COLUMN_PERMISSION", +} + +export interface GetTemporaryGluePartitionCredentialsRequest { + /** + *

              The ARN of the partitions' table.

              + */ + TableArn: string | undefined; + + /** + *

              A list of partition values identifying a single partition.

              + */ + Partition: PartitionValueList | undefined; + + /** + *

              Filters the request based on the user having been granted a list of specified permissions on the requested resource(s).

              + */ + Permissions?: (Permission | string)[]; + + /** + *

              The time period, between 900 and 21,600 seconds, for the timeout of the temporary credentials.

              + */ + DurationSeconds?: number; + + /** + *

              A structure representing context to access a resource (column names, query ID, etc).

              + */ + AuditContext?: AuditContext; + + /** + *

              A list of supported permission types for the partition. Valid values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION.

              + */ + SupportedPermissionTypes: (PermissionType | string)[] | undefined; +} + +export namespace GetTemporaryGluePartitionCredentialsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTemporaryGluePartitionCredentialsRequest): any => ({ + ...obj, + }); +} + +export interface GetTemporaryGluePartitionCredentialsResponse { + /** + *

              The access key ID for the temporary credentials.

              + */ + AccessKeyId?: string; + + /** + *

              The secret key for the temporary credentials.

              + */ + SecretAccessKey?: string; + + /** + *

              The session token for the temporary credentials.

              + */ + SessionToken?: string; + + /** + *

              The date and time when the temporary credentials expire.

              + */ + Expiration?: Date; +} + +export namespace GetTemporaryGluePartitionCredentialsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTemporaryGluePartitionCredentialsResponse): any => ({ + ...obj, + }); +} + +/** + *

              The engine does not support filtering data based on the enforced permissions. For example, if you call the GetTemporaryGlueTableCredentials operation with SupportedPermissionType equal to ColumnPermission, but cell-level permissions exist on the table, this exception is thrown.

              + */ +export interface PermissionTypeMismatchException extends __SmithyException, $MetadataBearer { + name: "PermissionTypeMismatchException"; + $fault: "client"; + /** + *

              A message describing the problem.

              + */ + Message?: string; +} + +export namespace PermissionTypeMismatchException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PermissionTypeMismatchException): any => ({ + ...obj, + }); +} + +export interface GetTemporaryGlueTableCredentialsRequest { + /** + *

              The ARN identifying a table in the Data Catalog for the temporary credentials request.

              + */ + TableArn: string | undefined; + + /** + *

              Filters the request based on the user having been granted a list of specified permissions on the requested resource(s).

              + */ + Permissions?: (Permission | string)[]; + + /** + *

              The time period, between 900 and 21,600 seconds, for the timeout of the temporary credentials.

              + */ + DurationSeconds?: number; + + /** + *

              A structure representing context to access a resource (column names, query ID, etc).

              + */ + AuditContext?: AuditContext; + + /** + *

              A list of supported permission types for the table. Valid values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION.

              + */ + SupportedPermissionTypes: (PermissionType | string)[] | undefined; +} + +export namespace GetTemporaryGlueTableCredentialsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTemporaryGlueTableCredentialsRequest): any => ({ + ...obj, + }); +} + +export interface GetTemporaryGlueTableCredentialsResponse { + /** + *

              The access key ID for the temporary credentials.

              + */ + AccessKeyId?: string; + + /** + *

              The secret key for the temporary credentials.

              + */ + SecretAccessKey?: string; + + /** + *

              The session token for the temporary credentials.

              + */ + SessionToken?: string; + + /** + *

              The date and time when the temporary credentials expire.

              + */ + Expiration?: Date; +} + +export namespace GetTemporaryGlueTableCredentialsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTemporaryGlueTableCredentialsResponse): any => ({ + ...obj, + }); +} + export interface GetWorkUnitResultsRequest { /** *

              The ID of the plan query operation for which to get results.

              diff --git a/clients/client-lakeformation/src/protocols/Aws_restJson1.ts b/clients/client-lakeformation/src/protocols/Aws_restJson1.ts index 37ae98700552..e37e7f6d3913 100644 --- a/clients/client-lakeformation/src/protocols/Aws_restJson1.ts +++ b/clients/client-lakeformation/src/protocols/Aws_restJson1.ts @@ -4,6 +4,7 @@ import { isValidHostname as __isValidHostname, } from "@aws-sdk/protocol-http"; import { + expectBoolean as __expectBoolean, expectLong as __expectLong, expectNonNull as __expectNonNull, expectNumber as __expectNumber, @@ -68,6 +69,14 @@ import { GetQueryStateCommandInput, GetQueryStateCommandOutput } from "../comman import { GetQueryStatisticsCommandInput, GetQueryStatisticsCommandOutput } from "../commands/GetQueryStatisticsCommand"; import { GetResourceLFTagsCommandInput, GetResourceLFTagsCommandOutput } from "../commands/GetResourceLFTagsCommand"; import { GetTableObjectsCommandInput, GetTableObjectsCommandOutput } from "../commands/GetTableObjectsCommand"; +import { + GetTemporaryGluePartitionCredentialsCommandInput, + GetTemporaryGluePartitionCredentialsCommandOutput, +} from "../commands/GetTemporaryGluePartitionCredentialsCommand"; +import { + GetTemporaryGlueTableCredentialsCommandInput, + GetTemporaryGlueTableCredentialsCommandOutput, +} from "../commands/GetTemporaryGlueTableCredentialsCommand"; import { GetWorkUnitResultsCommandInput, GetWorkUnitResultsCommandOutput } from "../commands/GetWorkUnitResultsCommand"; import { GetWorkUnitsCommandInput, GetWorkUnitsCommandOutput } from "../commands/GetWorkUnitsCommand"; import { GrantPermissionsCommandInput, GrantPermissionsCommandOutput } from "../commands/GrantPermissionsCommand"; @@ -115,6 +124,7 @@ import { AddObjectInput, AllRowsWildcard, AlreadyExistsException, + AuditContext, BatchPermissionsFailureEntry, BatchPermissionsRequestEntry, CatalogResource, @@ -145,7 +155,10 @@ import { OperationTimeoutException, OptimizerType, PartitionObjects, + PartitionValueList, Permission, + PermissionType, + PermissionTypeMismatchException, PlanningStatistics, PrincipalPermissions, PrincipalResourcePermissions, @@ -737,6 +750,78 @@ export const serializeAws_restJson1GetTableObjectsCommand = async ( }); }; +export const serializeAws_restJson1GetTemporaryGluePartitionCredentialsCommand = async ( + input: GetTemporaryGluePartitionCredentialsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetTemporaryGluePartitionCredentials"; + let body: any; + body = JSON.stringify({ + ...(input.AuditContext !== undefined && + input.AuditContext !== null && { AuditContext: serializeAws_restJson1AuditContext(input.AuditContext, context) }), + ...(input.DurationSeconds !== undefined && + input.DurationSeconds !== null && { DurationSeconds: input.DurationSeconds }), + ...(input.Partition !== undefined && + input.Partition !== null && { Partition: serializeAws_restJson1PartitionValueList(input.Partition, context) }), + ...(input.Permissions !== undefined && + input.Permissions !== null && { Permissions: serializeAws_restJson1PermissionList(input.Permissions, context) }), + ...(input.SupportedPermissionTypes !== undefined && + input.SupportedPermissionTypes !== null && { + SupportedPermissionTypes: serializeAws_restJson1PermissionTypeList(input.SupportedPermissionTypes, context), + }), + ...(input.TableArn !== undefined && input.TableArn !== null && { TableArn: input.TableArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetTemporaryGlueTableCredentialsCommand = async ( + input: GetTemporaryGlueTableCredentialsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetTemporaryGlueTableCredentials"; + let body: any; + body = JSON.stringify({ + ...(input.AuditContext !== undefined && + input.AuditContext !== null && { AuditContext: serializeAws_restJson1AuditContext(input.AuditContext, context) }), + ...(input.DurationSeconds !== undefined && + input.DurationSeconds !== null && { DurationSeconds: input.DurationSeconds }), + ...(input.Permissions !== undefined && + input.Permissions !== null && { Permissions: serializeAws_restJson1PermissionList(input.Permissions, context) }), + ...(input.SupportedPermissionTypes !== undefined && + input.SupportedPermissionTypes !== null && { + SupportedPermissionTypes: serializeAws_restJson1PermissionTypeList(input.SupportedPermissionTypes, context), + }), + ...(input.TableArn !== undefined && input.TableArn !== null && { TableArn: input.TableArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1GetWorkUnitResultsCommand = async ( input: GetWorkUnitResultsCommandInput, context: __SerdeContext @@ -3217,6 +3302,220 @@ const deserializeAws_restJson1GetTableObjectsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1GetTemporaryGluePartitionCredentialsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetTemporaryGluePartitionCredentialsCommandError(output, context); + } + const contents: GetTemporaryGluePartitionCredentialsCommandOutput = { + $metadata: deserializeMetadata(output), + AccessKeyId: undefined, + Expiration: undefined, + SecretAccessKey: undefined, + SessionToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.AccessKeyId !== undefined && data.AccessKeyId !== null) { + contents.AccessKeyId = __expectString(data.AccessKeyId); + } + if (data.Expiration !== undefined && data.Expiration !== null) { + contents.Expiration = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.Expiration))); + } + if (data.SecretAccessKey !== undefined && data.SecretAccessKey !== null) { + contents.SecretAccessKey = __expectString(data.SecretAccessKey); + } + if (data.SessionToken !== undefined && data.SessionToken !== null) { + contents.SessionToken = __expectString(data.SessionToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetTemporaryGluePartitionCredentialsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "PermissionTypeMismatchException": + case "com.amazonaws.lakeformation#PermissionTypeMismatchException": + response = { + ...(await deserializeAws_restJson1PermissionTypeMismatchExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetTemporaryGlueTableCredentialsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetTemporaryGlueTableCredentialsCommandError(output, context); + } + const contents: GetTemporaryGlueTableCredentialsCommandOutput = { + $metadata: deserializeMetadata(output), + AccessKeyId: undefined, + Expiration: undefined, + SecretAccessKey: undefined, + SessionToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.AccessKeyId !== undefined && data.AccessKeyId !== null) { + contents.AccessKeyId = __expectString(data.AccessKeyId); + } + if (data.Expiration !== undefined && data.Expiration !== null) { + contents.Expiration = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.Expiration))); + } + if (data.SecretAccessKey !== undefined && data.SecretAccessKey !== null) { + contents.SecretAccessKey = __expectString(data.SecretAccessKey); + } + if (data.SessionToken !== undefined && data.SessionToken !== null) { + contents.SessionToken = __expectString(data.SessionToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetTemporaryGlueTableCredentialsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "PermissionTypeMismatchException": + case "com.amazonaws.lakeformation#PermissionTypeMismatchException": + response = { + ...(await deserializeAws_restJson1PermissionTypeMismatchExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1GetWorkUnitResultsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -5130,6 +5429,23 @@ const deserializeAws_restJson1OperationTimeoutExceptionResponse = async ( return contents; }; +const deserializeAws_restJson1PermissionTypeMismatchExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: PermissionTypeMismatchException = { + name: "PermissionTypeMismatchException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + const deserializeAws_restJson1ResourceNotReadyExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -5285,6 +5601,24 @@ const serializeAws_restJson1AllRowsWildcard = (input: AllRowsWildcard, context: return {}; }; +const serializeAws_restJson1AuditContext = (input: AuditContext, context: __SerdeContext): any => { + return { + ...(input.AdditionalAuditContext !== undefined && + input.AdditionalAuditContext !== null && { AdditionalAuditContext: input.AdditionalAuditContext }), + }; +}; + +const serializeAws_restJson1AuthorizedSessionTagValueList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1BatchPermissionsRequestEntry = ( input: BatchPermissionsRequestEntry, context: __SerdeContext @@ -5400,6 +5734,15 @@ const serializeAws_restJson1DataLakePrincipalList = (input: DataLakePrincipal[], const serializeAws_restJson1DataLakeSettings = (input: DataLakeSettings, context: __SerdeContext): any => { return { + ...(input.AllowExternalDataFiltering !== undefined && + input.AllowExternalDataFiltering !== null && { AllowExternalDataFiltering: input.AllowExternalDataFiltering }), + ...(input.AuthorizedSessionTagValueList !== undefined && + input.AuthorizedSessionTagValueList !== null && { + AuthorizedSessionTagValueList: serializeAws_restJson1AuthorizedSessionTagValueList( + input.AuthorizedSessionTagValueList, + context + ), + }), ...(input.CreateDatabaseDefaultPermissions !== undefined && input.CreateDatabaseDefaultPermissions !== null && { CreateDatabaseDefaultPermissions: serializeAws_restJson1PrincipalPermissionsList( @@ -5418,6 +5761,13 @@ const serializeAws_restJson1DataLakeSettings = (input: DataLakeSettings, context input.DataLakeAdmins !== null && { DataLakeAdmins: serializeAws_restJson1DataLakePrincipalList(input.DataLakeAdmins, context), }), + ...(input.ExternalDataFilteringAllowList !== undefined && + input.ExternalDataFilteringAllowList !== null && { + ExternalDataFilteringAllowList: serializeAws_restJson1DataLakePrincipalList( + input.ExternalDataFilteringAllowList, + context + ), + }), ...(input.TrustedResourceOwners !== undefined && input.TrustedResourceOwners !== null && { TrustedResourceOwners: serializeAws_restJson1TrustedResourceOwners(input.TrustedResourceOwners, context), @@ -5523,6 +5873,13 @@ const serializeAws_restJson1LFTagsList = (input: LFTagPair[], context: __SerdeCo }); }; +const serializeAws_restJson1PartitionValueList = (input: PartitionValueList, context: __SerdeContext): any => { + return { + ...(input.Values !== undefined && + input.Values !== null && { Values: serializeAws_restJson1ValueStringList(input.Values, context) }), + }; +}; + const serializeAws_restJson1PartitionValuesList = (input: string[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -5545,6 +5902,17 @@ const serializeAws_restJson1PermissionList = (input: (Permission | string)[], co }); }; +const serializeAws_restJson1PermissionTypeList = (input: (PermissionType | string)[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1PrincipalPermissions = (input: PrincipalPermissions, context: __SerdeContext): any => { return { ...(input.Permissions !== undefined && @@ -5730,6 +6098,17 @@ const serializeAws_restJson1TrustedResourceOwners = (input: string[], context: _ }); }; +const serializeAws_restJson1ValueStringList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1VirtualObject = (input: VirtualObject, context: __SerdeContext): any => { return { ...(input.ETag !== undefined && input.ETag !== null && { ETag: input.ETag }), @@ -5774,6 +6153,17 @@ const deserializeAws_restJson1AllRowsWildcard = (output: any, context: __SerdeCo return {} as any; }; +const deserializeAws_restJson1AuthorizedSessionTagValueList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + const deserializeAws_restJson1BatchPermissionsFailureEntry = ( output: any, context: __SerdeContext @@ -5955,6 +6345,11 @@ const deserializeAws_restJson1DataLakePrincipalList = (output: any, context: __S const deserializeAws_restJson1DataLakeSettings = (output: any, context: __SerdeContext): DataLakeSettings => { return { + AllowExternalDataFiltering: __expectBoolean(output.AllowExternalDataFiltering), + AuthorizedSessionTagValueList: + output.AuthorizedSessionTagValueList !== undefined && output.AuthorizedSessionTagValueList !== null + ? deserializeAws_restJson1AuthorizedSessionTagValueList(output.AuthorizedSessionTagValueList, context) + : undefined, CreateDatabaseDefaultPermissions: output.CreateDatabaseDefaultPermissions !== undefined && output.CreateDatabaseDefaultPermissions !== null ? deserializeAws_restJson1PrincipalPermissionsList(output.CreateDatabaseDefaultPermissions, context) @@ -5967,6 +6362,10 @@ const deserializeAws_restJson1DataLakeSettings = (output: any, context: __SerdeC output.DataLakeAdmins !== undefined && output.DataLakeAdmins !== null ? deserializeAws_restJson1DataLakePrincipalList(output.DataLakeAdmins, context) : undefined, + ExternalDataFilteringAllowList: + output.ExternalDataFilteringAllowList !== undefined && output.ExternalDataFilteringAllowList !== null + ? deserializeAws_restJson1DataLakePrincipalList(output.ExternalDataFilteringAllowList, context) + : undefined, TrustedResourceOwners: output.TrustedResourceOwners !== undefined && output.TrustedResourceOwners !== null ? deserializeAws_restJson1TrustedResourceOwners(output.TrustedResourceOwners, context) diff --git a/clients/client-lambda/src/endpoints.ts b/clients/client-lambda/src/endpoints.ts index dc854900420a..1d31587c1e04 100644 --- a/clients/client-lambda/src/endpoints.ts +++ b/clients/client-lambda/src/endpoints.ts @@ -2,6 +2,246 @@ import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolv import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; const regionHash: RegionHash = { + "af-south-1": { + variants: [ + { + hostname: "lambda.af-south-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.af-south-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "ap-east-1": { + variants: [ + { + hostname: "lambda.ap-east-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.ap-east-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "ap-northeast-1": { + variants: [ + { + hostname: "lambda.ap-northeast-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.ap-northeast-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "ap-northeast-2": { + variants: [ + { + hostname: "lambda.ap-northeast-2.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.ap-northeast-2.api.aws", + tags: ["dualstack"], + }, + ], + }, + "ap-northeast-3": { + variants: [ + { + hostname: "lambda.ap-northeast-3.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.ap-northeast-3.api.aws", + tags: ["dualstack"], + }, + ], + }, + "ap-south-1": { + variants: [ + { + hostname: "lambda.ap-south-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.ap-south-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "ap-southeast-1": { + variants: [ + { + hostname: "lambda.ap-southeast-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.ap-southeast-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "ap-southeast-2": { + variants: [ + { + hostname: "lambda.ap-southeast-2.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.ap-southeast-2.api.aws", + tags: ["dualstack"], + }, + ], + }, + "ap-southeast-3": { + variants: [ + { + hostname: "lambda.ap-southeast-3.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.ap-southeast-3.api.aws", + tags: ["dualstack"], + }, + ], + }, + "ca-central-1": { + variants: [ + { + hostname: "lambda.ca-central-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.ca-central-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "cn-north-1": { + variants: [ + { + hostname: "lambda.cn-north-1.amazonaws.com.cn", + tags: [], + }, + { + hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "cn-northwest-1": { + variants: [ + { + hostname: "lambda.cn-northwest-1.amazonaws.com.cn", + tags: [], + }, + { + hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "eu-central-1": { + variants: [ + { + hostname: "lambda.eu-central-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.eu-central-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "eu-north-1": { + variants: [ + { + hostname: "lambda.eu-north-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.eu-north-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "eu-south-1": { + variants: [ + { + hostname: "lambda.eu-south-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.eu-south-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "eu-west-1": { + variants: [ + { + hostname: "lambda.eu-west-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.eu-west-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "eu-west-2": { + variants: [ + { + hostname: "lambda.eu-west-2.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.eu-west-2.api.aws", + tags: ["dualstack"], + }, + ], + }, + "eu-west-3": { + variants: [ + { + hostname: "lambda.eu-west-3.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.eu-west-3.api.aws", + tags: ["dualstack"], + }, + ], + }, + "me-south-1": { + variants: [ + { + hostname: "lambda.me-south-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.me-south-1.api.aws", + tags: ["dualstack"], + }, + ], + }, + "sa-east-1": { + variants: [ + { + hostname: "lambda.sa-east-1.amazonaws.com", + tags: [], + }, + { + hostname: "lambda.sa-east-1.api.aws", + tags: ["dualstack"], + }, + ], + }, "us-east-1": { variants: [ { @@ -12,6 +252,10 @@ const regionHash: RegionHash = { hostname: "lambda-fips.us-east-1.amazonaws.com", tags: ["fips"], }, + { + hostname: "lambda.us-east-1.api.aws", + tags: ["dualstack"], + }, ], }, "us-east-2": { @@ -24,6 +268,10 @@ const regionHash: RegionHash = { hostname: "lambda-fips.us-east-2.amazonaws.com", tags: ["fips"], }, + { + hostname: "lambda.us-east-2.api.aws", + tags: ["dualstack"], + }, ], }, "us-gov-east-1": { @@ -60,6 +308,10 @@ const regionHash: RegionHash = { hostname: "lambda-fips.us-west-1.amazonaws.com", tags: ["fips"], }, + { + hostname: "lambda.us-west-1.api.aws", + tags: ["dualstack"], + }, ], }, "us-west-2": { @@ -72,6 +324,10 @@ const regionHash: RegionHash = { hostname: "lambda-fips.us-west-2.amazonaws.com", tags: ["fips"], }, + { + hostname: "lambda.us-west-2.api.aws", + tags: ["dualstack"], + }, ], }, }; diff --git a/clients/client-mediaconvert/src/commands/UpdatePresetCommand.ts b/clients/client-mediaconvert/src/commands/UpdatePresetCommand.ts index b46a19829fe5..deddf6446420 100644 --- a/clients/client-mediaconvert/src/commands/UpdatePresetCommand.ts +++ b/clients/client-mediaconvert/src/commands/UpdatePresetCommand.ts @@ -12,7 +12,8 @@ import { } from "@aws-sdk/types"; import { MediaConvertClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../MediaConvertClient"; -import { UpdatePresetRequest, UpdatePresetResponse } from "../models/models_1"; +import { UpdatePresetRequest } from "../models/models_1"; +import { UpdatePresetResponse } from "../models/models_2"; import { deserializeAws_restJson1UpdatePresetCommand, serializeAws_restJson1UpdatePresetCommand, diff --git a/clients/client-mediaconvert/src/models/models_1.ts b/clients/client-mediaconvert/src/models/models_1.ts index 4566aeb4bd1d..994f8b6e9584 100644 --- a/clients/client-mediaconvert/src/models/models_1.ts +++ b/clients/client-mediaconvert/src/models/models_1.ts @@ -3013,6 +3013,12 @@ export enum NoiseFilterPostTemporalSharpening { ENABLED = "ENABLED", } +export enum NoiseFilterPostTemporalSharpeningStrength { + HIGH = "HIGH", + LOW = "LOW", + MEDIUM = "MEDIUM", +} + /** * Noise reducer filter settings for temporal filter. */ @@ -3023,10 +3029,15 @@ export interface NoiseReducerTemporalFilterSettings { AggressiveMode?: number; /** - * Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), you can use this setting to apply sharpening. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply filtering, depending on input type and quality. When you set Noise reducer to Temporal, your output bandwidth is reduced. When Post temporal sharpening is also enabled, that bandwidth reduction is smaller. + * When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the sharpness of your output is reduced. You can optionally use Post temporal sharpening (PostTemporalSharpening) to apply sharpening to the edges of your output. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (PostTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening. */ PostTemporalSharpening?: NoiseFilterPostTemporalSharpening | string; + /** + * Use Post temporal sharpening strength (PostTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), or leave blank, to apply a low amount of sharpening. Set Post temporal sharpening strength to Medium (MEDIUM) to apply medium amount of sharpening. Set Post temporal sharpening strength to High (HIGH) to apply a high amount of sharpening. + */ + PostTemporalSharpeningStrength?: NoiseFilterPostTemporalSharpeningStrength | string; + /** * The speed of the filter (higher number is faster). Low setting reduces bit rate at the cost of transcode time, high setting improves transcode time at the cost of bit rate. */ @@ -5476,19 +5487,3 @@ export namespace UpdatePresetRequest { ...obj, }); } - -export interface UpdatePresetResponse { - /** - * A preset is a collection of preconfigured media conversion settings that you want MediaConvert to apply to the output during the conversion process. - */ - Preset?: Preset; -} - -export namespace UpdatePresetResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: UpdatePresetResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-mediaconvert/src/models/models_2.ts b/clients/client-mediaconvert/src/models/models_2.ts index bf1b13a27be8..3ba77801188f 100644 --- a/clients/client-mediaconvert/src/models/models_2.ts +++ b/clients/client-mediaconvert/src/models/models_2.ts @@ -1,4 +1,20 @@ -import { Queue, QueueStatus, ReservationPlanSettings } from "./models_1"; +import { Preset, Queue, QueueStatus, ReservationPlanSettings } from "./models_1"; + +export interface UpdatePresetResponse { + /** + * A preset is a collection of preconfigured media conversion settings that you want MediaConvert to apply to the output during the conversion process. + */ + Preset?: Preset; +} + +export namespace UpdatePresetResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdatePresetResponse): any => ({ + ...obj, + }); +} export interface UpdateQueueRequest { /** diff --git a/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts b/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts index 45ec1a2f2da7..34eb62ad7520 100644 --- a/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts +++ b/clients/client-mediaconvert/src/protocols/Aws_restJson1.ts @@ -6417,6 +6417,10 @@ const serializeAws_restJson1NoiseReducerTemporalFilterSettings = ( input.AggressiveMode !== null && { aggressiveMode: input.AggressiveMode }), ...(input.PostTemporalSharpening !== undefined && input.PostTemporalSharpening !== null && { postTemporalSharpening: input.PostTemporalSharpening }), + ...(input.PostTemporalSharpeningStrength !== undefined && + input.PostTemporalSharpeningStrength !== null && { + postTemporalSharpeningStrength: input.PostTemporalSharpeningStrength, + }), ...(input.Speed !== undefined && input.Speed !== null && { speed: input.Speed }), ...(input.Strength !== undefined && input.Strength !== null && { strength: input.Strength }), }; @@ -9570,6 +9574,7 @@ const deserializeAws_restJson1NoiseReducerTemporalFilterSettings = ( return { AggressiveMode: __expectInt32(output.aggressiveMode), PostTemporalSharpening: __expectString(output.postTemporalSharpening), + PostTemporalSharpeningStrength: __expectString(output.postTemporalSharpeningStrength), Speed: __expectInt32(output.speed), Strength: __expectInt32(output.strength), } as any; diff --git a/clients/client-medialive/src/models/models_0.ts b/clients/client-medialive/src/models/models_0.ts index b0674082b358..cba710c8ca51 100644 --- a/clients/client-medialive/src/models/models_0.ts +++ b/clients/client-medialive/src/models/models_0.ts @@ -5643,6 +5643,11 @@ export enum HlsProgramDateTime { INCLUDE = "INCLUDE", } +export enum HlsProgramDateTimeClock { + INITIALIZE_FROM_OUTPUT_TIMECODE = "INITIALIZE_FROM_OUTPUT_TIMECODE", + SYSTEM_CLOCK = "SYSTEM_CLOCK", +} + export enum HlsRedundantManifest { DISABLED = "DISABLED", ENABLED = "ENABLED", @@ -5854,10 +5859,19 @@ export interface HlsGroupSettings { OutputSelection?: HlsOutputSelection | string; /** - * Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. The value is calculated as follows: either the program date and time are initialized using the input timecode source, or the time is initialized using the input timecode source and the date is initialized using the timestampOffset. + * Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. The value is calculated using the program date time clock. */ ProgramDateTime?: HlsProgramDateTime | string; + /** + * Specifies the algorithm used to drive the HLS EXT-X-PROGRAM-DATE-TIME clock. Options include: + * + * INITIALIZE_FROM_OUTPUT_TIMECODE: The PDT clock is initialized as a function of the first output timecode, then incremented by the EXTINF duration of each encoded segment. + * + * SYSTEM_CLOCK: The PDT clock is initialized as a function of the UTC wall clock, then incremented by the EXTINF duration of each encoded segment. If the PDT clock diverges from the wall clock by more than 500ms, it is resynchronized to the wall clock. + */ + ProgramDateTimeClock?: HlsProgramDateTimeClock | string; + /** * Period of insertion of EXT-X-PROGRAM-DATE-TIME entry, in seconds. */ @@ -5923,22 +5937,3 @@ export namespace HlsGroupSettings { ...obj, }); } - -/** - * Media Package Group Settings - */ -export interface MediaPackageGroupSettings { - /** - * MediaPackage channel destination. - */ - Destination: OutputLocationRef | undefined; -} - -export namespace MediaPackageGroupSettings { - /** - * @internal - */ - export const filterSensitiveLog = (obj: MediaPackageGroupSettings): any => ({ - ...obj, - }); -} diff --git a/clients/client-medialive/src/models/models_1.ts b/clients/client-medialive/src/models/models_1.ts index cedfe2af4613..fdfc52060021 100644 --- a/clients/client-medialive/src/models/models_1.ts +++ b/clients/client-medialive/src/models/models_1.ts @@ -45,7 +45,6 @@ import { LogLevel, MediaConnectFlow, MediaConnectFlowRequest, - MediaPackageGroupSettings, MultiplexOutputDestination, MultiplexProgramPipelineDetail, MultiplexProgramSummary, @@ -61,6 +60,25 @@ import { VpcOutputSettingsDescription, } from "./models_0"; +/** + * Media Package Group Settings + */ +export interface MediaPackageGroupSettings { + /** + * MediaPackage channel destination. + */ + Destination: OutputLocationRef | undefined; +} + +export namespace MediaPackageGroupSettings { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MediaPackageGroupSettings): any => ({ + ...obj, + }); +} + export enum SmoothGroupAudioOnlyTimecodeControl { PASSTHROUGH = "PASSTHROUGH", USE_CONFIGURED_CLOCK = "USE_CONFIGURED_CLOCK", diff --git a/clients/client-medialive/src/protocols/Aws_restJson1.ts b/clients/client-medialive/src/protocols/Aws_restJson1.ts index 2034e664ddd6..25c65425dc56 100644 --- a/clients/client-medialive/src/protocols/Aws_restJson1.ts +++ b/clients/client-medialive/src/protocols/Aws_restJson1.ts @@ -232,7 +232,6 @@ import { M3u8Settings, MediaConnectFlow, MediaConnectFlowRequest, - MediaPackageGroupSettings, MediaPackageOutputDestinationSettings, MediaPackageOutputSettings, Mp2Settings, @@ -322,6 +321,7 @@ import { InputSwitchScheduleActionSettings, InputVpcRequest, InternalServerErrorException, + MediaPackageGroupSettings, MotionGraphicsActivateScheduleActionSettings, MotionGraphicsConfiguration, MotionGraphicsDeactivateScheduleActionSettings, @@ -10840,6 +10840,8 @@ const serializeAws_restJson1HlsGroupSettings = (input: HlsGroupSettings, context input.OutputSelection !== null && { outputSelection: input.OutputSelection }), ...(input.ProgramDateTime !== undefined && input.ProgramDateTime !== null && { programDateTime: input.ProgramDateTime }), + ...(input.ProgramDateTimeClock !== undefined && + input.ProgramDateTimeClock !== null && { programDateTimeClock: input.ProgramDateTimeClock }), ...(input.ProgramDateTimePeriod !== undefined && input.ProgramDateTimePeriod !== null && { programDateTimePeriod: input.ProgramDateTimePeriod }), ...(input.RedundantManifest !== undefined && @@ -14248,6 +14250,7 @@ const deserializeAws_restJson1HlsGroupSettings = (output: any, context: __SerdeC Mode: __expectString(output.mode), OutputSelection: __expectString(output.outputSelection), ProgramDateTime: __expectString(output.programDateTime), + ProgramDateTimeClock: __expectString(output.programDateTimeClock), ProgramDateTimePeriod: __expectInt32(output.programDateTimePeriod), RedundantManifest: __expectString(output.redundantManifest), SegmentLength: __expectInt32(output.segmentLength), diff --git a/clients/client-mediatailor/src/models/models_0.ts b/clients/client-mediatailor/src/models/models_0.ts index ceaa0339653a..3f2513d306d4 100644 --- a/clients/client-mediatailor/src/models/models_0.ts +++ b/clients/client-mediatailor/src/models/models_0.ts @@ -280,7 +280,7 @@ export interface Channel { CreationTime?: Date; /** - *

              Contains information about the slate used to fill gaps between programs in the schedule. You must configure FillerSlate if your channel uses an LINEAR PlaybackMode.

              + *

              The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses the LINEAR PlaybackMode. MediaTailor doesn't support filler slate for channels using the LOOP PlaybackMode.

              */ FillerSlate?: SlateSource; @@ -1181,7 +1181,7 @@ export interface CreateChannelRequest { ChannelName: string | undefined; /** - *

              The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses a LINEAR PlaybackMode.

              + *

              The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses the LINEAR PlaybackMode. MediaTailor doesn't support filler slate for channels using the LOOP PlaybackMode.

              */ FillerSlate?: SlateSource; @@ -3056,6 +3056,11 @@ export interface UpdateChannelRequest { */ ChannelName: string | undefined; + /** + *

              The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses the LINEAR PlaybackMode. MediaTailor doesn't support filler slate for channels using the LOOP PlaybackMode.

              + */ + FillerSlate?: SlateSource; + /** *

              The channel's output properties.

              */ diff --git a/clients/client-mediatailor/src/protocols/Aws_restJson1.ts b/clients/client-mediatailor/src/protocols/Aws_restJson1.ts index 41fa0bb27770..2fb29bd53cc5 100644 --- a/clients/client-mediatailor/src/protocols/Aws_restJson1.ts +++ b/clients/client-mediatailor/src/protocols/Aws_restJson1.ts @@ -1354,6 +1354,8 @@ export const serializeAws_restJson1UpdateChannelCommand = async ( } let body: any; body = JSON.stringify({ + ...(input.FillerSlate !== undefined && + input.FillerSlate !== null && { FillerSlate: serializeAws_restJson1SlateSource(input.FillerSlate, context) }), ...(input.Outputs !== undefined && input.Outputs !== null && { Outputs: serializeAws_restJson1RequestOutputs(input.Outputs, context) }), }); diff --git a/clients/client-mwaa/README.md b/clients/client-mwaa/README.md index 600f9d153754..203b3643b8ca 100644 --- a/clients/client-mwaa/README.md +++ b/clients/client-mwaa/README.md @@ -11,6 +11,90 @@ AWS SDK for JavaScript MWAA Client for Node.js, Browser and React Native.

              This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.

              +

              +Endpoints +

              + + +

              +Regions +

              +

              For a list of regions that Amazon MWAA supports, see Region availability in the Amazon MWAA User Guide.

              + ## Installing To install the this package, simply type add or install @aws-sdk/client-mwaa diff --git a/clients/client-mwaa/src/MWAA.ts b/clients/client-mwaa/src/MWAA.ts index a29354c49fea..e9397252a050 100644 --- a/clients/client-mwaa/src/MWAA.ts +++ b/clients/client-mwaa/src/MWAA.ts @@ -56,10 +56,94 @@ import { MWAAClient } from "./MWAAClient"; /** * Amazon Managed Workflows for Apache Airflow *

              This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.

              + * + *

              + * Endpoints + *

              + * + * + *

              + * Regions + *

              + *

              For a list of regions that Amazon MWAA supports, see Region availability in the Amazon MWAA User Guide.

              */ export class MWAA extends MWAAClient { /** - *

              Create a CLI token to use Airflow CLI.

              + *

              Creates a CLI token for the Airflow CLI. To learn more, see Creating an Apache Airflow CLI token.

              */ public createCliToken( args: CreateCliTokenCommandInput, @@ -123,7 +207,7 @@ export class MWAA extends MWAAClient { } /** - *

              Create a JWT token to be used to login to Airflow Web UI with claims based Authentication.

              + *

              Creates a web login token for the Airflow Web UI. To learn more, see Creating an Apache Airflow web login token.

              */ public createWebLoginToken( args: CreateWebLoginTokenCommandInput, @@ -187,7 +271,7 @@ export class MWAA extends MWAAClient { } /** - *

              Retrieves the details of an Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              + *

              Describes an Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              */ public getEnvironment( args: GetEnvironmentCommandInput, @@ -283,7 +367,8 @@ export class MWAA extends MWAAClient { } /** - * An operation for publishing metrics from the customers to the Ops plane. + *

              + * Internal only. Publishes environment health metrics to Amazon CloudWatch.

              */ public publishMetrics( args: PublishMetricsCommandInput, diff --git a/clients/client-mwaa/src/MWAAClient.ts b/clients/client-mwaa/src/MWAAClient.ts index 7558d75cd1a7..73021ab87a87 100644 --- a/clients/client-mwaa/src/MWAAClient.ts +++ b/clients/client-mwaa/src/MWAAClient.ts @@ -245,6 +245,90 @@ export interface MWAAClientResolvedConfig extends MWAAClientResolvedConfigType { /** * Amazon Managed Workflows for Apache Airflow *

              This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.

              + * + *

              + * Endpoints + *

              + * + * + *

              + * Regions + *

              + *

              For a list of regions that Amazon MWAA supports, see Region availability in the Amazon MWAA User Guide.

              */ export class MWAAClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-mwaa/src/commands/CreateCliTokenCommand.ts b/clients/client-mwaa/src/commands/CreateCliTokenCommand.ts index a8d7024011fa..0b58d410611c 100644 --- a/clients/client-mwaa/src/commands/CreateCliTokenCommand.ts +++ b/clients/client-mwaa/src/commands/CreateCliTokenCommand.ts @@ -22,7 +22,7 @@ export interface CreateCliTokenCommandInput extends CreateCliTokenRequest {} export interface CreateCliTokenCommandOutput extends CreateCliTokenResponse, __MetadataBearer {} /** - *

              Create a CLI token to use Airflow CLI.

              + *

              Creates a CLI token for the Airflow CLI. To learn more, see Creating an Apache Airflow CLI token.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mwaa/src/commands/CreateWebLoginTokenCommand.ts b/clients/client-mwaa/src/commands/CreateWebLoginTokenCommand.ts index 3ff2da11eab4..c3526fd602ec 100644 --- a/clients/client-mwaa/src/commands/CreateWebLoginTokenCommand.ts +++ b/clients/client-mwaa/src/commands/CreateWebLoginTokenCommand.ts @@ -22,7 +22,7 @@ export interface CreateWebLoginTokenCommandInput extends CreateWebLoginTokenRequ export interface CreateWebLoginTokenCommandOutput extends CreateWebLoginTokenResponse, __MetadataBearer {} /** - *

              Create a JWT token to be used to login to Airflow Web UI with claims based Authentication.

              + *

              Creates a web login token for the Airflow Web UI. To learn more, see Creating an Apache Airflow web login token.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mwaa/src/commands/GetEnvironmentCommand.ts b/clients/client-mwaa/src/commands/GetEnvironmentCommand.ts index 411aade2e50f..ea903978735c 100644 --- a/clients/client-mwaa/src/commands/GetEnvironmentCommand.ts +++ b/clients/client-mwaa/src/commands/GetEnvironmentCommand.ts @@ -22,7 +22,7 @@ export interface GetEnvironmentCommandInput extends GetEnvironmentInput {} export interface GetEnvironmentCommandOutput extends GetEnvironmentOutput, __MetadataBearer {} /** - *

              Retrieves the details of an Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              + *

              Describes an Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mwaa/src/commands/PublishMetricsCommand.ts b/clients/client-mwaa/src/commands/PublishMetricsCommand.ts index fcbf9d790ddd..1708bccfa212 100644 --- a/clients/client-mwaa/src/commands/PublishMetricsCommand.ts +++ b/clients/client-mwaa/src/commands/PublishMetricsCommand.ts @@ -22,7 +22,8 @@ export interface PublishMetricsCommandInput extends PublishMetricsInput {} export interface PublishMetricsCommandOutput extends PublishMetricsOutput, __MetadataBearer {} /** - * An operation for publishing metrics from the customers to the Ops plane. + *

              + * Internal only. Publishes environment health metrics to Amazon CloudWatch.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-mwaa/src/models/models_0.ts b/clients/client-mwaa/src/models/models_0.ts index 2ea8d8118d84..af3721e33a15 100644 --- a/clients/client-mwaa/src/models/models_0.ts +++ b/clients/client-mwaa/src/models/models_0.ts @@ -2,7 +2,7 @@ import { SENSITIVE_STRING } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; /** - *

              Access to the Airflow Web UI or CLI has been Denied. Please follow the MWAA user guide to setup permissions to access the Web UI and CLI functionality.

              + *

              Access to the Apache Airflow Web UI or CLI has been denied due to insufficient permissions. To learn more, see Accessing an Amazon MWAA environment.

              */ export interface AccessDeniedException extends __SmithyException, $MetadataBearer { name: "AccessDeniedException"; @@ -21,7 +21,7 @@ export namespace AccessDeniedException { export interface CreateCliTokenRequest { /** - *

              Create a CLI token request for a MWAA environment.

              + *

              The name of the Amazon MWAA environment. For example, MyMWAAEnvironment.

              */ Name: string | undefined; } @@ -37,12 +37,12 @@ export namespace CreateCliTokenRequest { export interface CreateCliTokenResponse { /** - *

              Create an Airflow CLI login token response for the provided JWT token.

              + *

              An Airflow CLI login token.

              */ CliToken?: string; /** - *

              Create an Airflow CLI login token response for the provided webserver hostname.

              + *

              The Airflow web server hostname for the environment.

              */ WebServerHostname?: string; } @@ -84,16 +84,16 @@ export enum LoggingLevel { } /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              Enables the Apache Airflow log type (e.g. DagProcessingLogs) and defines the log level to send to CloudWatch Logs (e.g. INFO).

              */ export interface ModuleLoggingConfigurationInput { /** - *

              Indicates whether to enable the Apache Airflow log type (e.g. DagProcessingLogs) in CloudWatch Logs.

              + *

              Indicates whether to enable the Apache Airflow log type (e.g. DagProcessingLogs).

              */ Enabled: boolean | undefined; /** - *

              Defines the Apache Airflow logs to send for the log type (e.g. DagProcessingLogs) to CloudWatch Logs. Valid values: CRITICAL, ERROR, WARNING, INFO.

              + *

              Defines the Apache Airflow log level (e.g. INFO) to send to CloudWatch Logs.

              */ LogLevel: LoggingLevel | string | undefined; } @@ -108,31 +108,31 @@ export namespace ModuleLoggingConfigurationInput { } /** - *

              Defines the Apache Airflow logs to send to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              + *

              Defines the Apache Airflow log types to send to CloudWatch Logs.

              */ export interface LoggingConfigurationInput { /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              Publishes Airflow DAG processing logs to CloudWatch Logs.

              */ DagProcessingLogs?: ModuleLoggingConfigurationInput; /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              Publishes Airflow scheduler logs to CloudWatch Logs.

              */ SchedulerLogs?: ModuleLoggingConfigurationInput; /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              Publishes Airflow web server logs to CloudWatch Logs.

              */ WebserverLogs?: ModuleLoggingConfigurationInput; /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              Publishes Airflow worker logs to CloudWatch Logs.

              */ WorkerLogs?: ModuleLoggingConfigurationInput; /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              Publishes Airflow task logs to CloudWatch Logs.

              */ TaskLogs?: ModuleLoggingConfigurationInput; } @@ -147,16 +147,16 @@ export namespace LoggingConfigurationInput { } /** - *

              The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see About networking on Amazon MWAA.

              + *

              Describes the VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              */ export interface NetworkConfiguration { /** - *

              A list of 2 subnet IDs. Required to create an environment. Must be private subnets in two different availability zones. A subnet must be attached to the same VPC as the security group.

              + *

              A list of subnet IDs. To learn more, see About networking on Amazon MWAA.

              */ SubnetIds?: string[]; /** - *

              A list of 1 or more security group IDs. Accepts up to 5 security group IDs. A security group must be attached to the same VPC as the subnets. To learn more, see Security in your VPC on Amazon MWAA.

              + *

              A list of security group IDs. To learn more, see Security in your VPC on Amazon MWAA.

              */ SecurityGroupIds?: string[]; } @@ -185,7 +185,7 @@ export interface CreateEnvironmentInput { Name: string | undefined; /** - *

              The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an AWS Identity and Access Management (IAM) role that grants MWAA permission to access AWS services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              + *

              The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an Amazon Web Services Identity and Access Management (IAM) role that grants MWAA permission to access Amazon Web Services services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              */ ExecutionRoleArn: string | undefined; @@ -200,7 +200,7 @@ export interface CreateEnvironmentInput { DagS3Path: string | undefined; /** - *

              The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see About networking on Amazon MWAA.

              + *

              The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              */ NetworkConfiguration: NetworkConfiguration | undefined; @@ -240,32 +240,27 @@ export interface CreateEnvironmentInput { MaxWorkers?: number; /** - *

              The AWS Key Management Service (KMS) key to encrypt the data in your environment. You can use an AWS owned CMK, or a Customer managed CMK (advanced). To learn more, see Get started with Amazon Managed Workflows for Apache Airflow.

              + *

              The Amazon Web Services Key Management Service (KMS) key to encrypt the data in your environment. You can use an Amazon Web Services owned CMK, or a Customer managed CMK (advanced). To learn more, see Create an Amazon MWAA environment.

              */ KmsKey?: string; /** - *

              The Apache Airflow version for your environment. For example, v1.10.12. If no value is specified, defaults to the latest version. Valid values: v1.10.12.

              + *

              The Apache Airflow version for your environment. If no value is specified, defaults to the latest version. Valid values: 1.10.12, 2.0.2. To learn more, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (MWAA).

              */ AirflowVersion?: string; /** - *

              Defines the Apache Airflow logs to send to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              + *

              Defines the Apache Airflow logs to send to CloudWatch Logs.

              */ LoggingConfiguration?: LoggingConfigurationInput; /** - *

              The day and time of the week to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only. Supported input includes the following:

              - *
                - *
              • - *

                MON|TUE|WED|THU|FRI|SAT|SUN:([01]\\d|2[0-3]):(00|30)

                - *
              • - *
              + *

              The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

              */ WeeklyMaintenanceWindowStart?: string; /** - *

              The key-value tag pairs you want to associate to your environment. For example, "Environment": "Staging". To learn more, see Tagging AWS resources.

              + *

              The key-value tag pairs you want to associate to your environment. For example, "Environment": "Staging". To learn more, see Tagging Amazon Web Services resources.

              */ Tags?: { [key: string]: string }; @@ -280,7 +275,15 @@ export interface CreateEnvironmentInput { MinWorkers?: number; /** - *

              The number of Apache Airflow schedulers to run in your environment.

              + *

              The number of Apache Airflow schedulers to run in your environment. Valid values:

              + *
                + *
              • + *

                v2.0.2 - Accepts between 2 to 5. Defaults to 2.

                + *
              • + *
              • + *

                v1.10.12 - Accepts 1.

                + *
              • + *
              */ Schedulers?: number; } @@ -349,7 +352,7 @@ export namespace ValidationException { export interface CreateWebLoginTokenRequest { /** - *

              Create an Airflow Web UI login token request for a MWAA environment.

              + *

              The name of the Amazon MWAA environment. For example, MyMWAAEnvironment.

              */ Name: string | undefined; } @@ -365,12 +368,12 @@ export namespace CreateWebLoginTokenRequest { export interface CreateWebLoginTokenResponse { /** - *

              Create an Airflow Web UI login token response for the provided JWT token.

              + *

              An Airflow web server login token.

              */ WebToken?: string; /** - *

              Create an Airflow Web UI login token response for the provided webserver hostname.

              + *

              The Airflow web server hostname for the environment.

              */ WebServerHostname?: string; } @@ -429,7 +432,7 @@ export namespace GetEnvironmentInput { } /** - *

              An object containing the error encountered with the last update: ErrorCode, ErrorMessage.

              + *

              Describes the error(s) encountered with the last update of the environment.

              */ export interface UpdateError { /** @@ -459,11 +462,11 @@ export enum UpdateStatus { } /** - *

              The status of the last update on the environment, and any errors that were encountered.

              + *

              Describes the status of the last update on the environment, and any errors that were encountered.

              */ export interface LastUpdate { /** - *

              The status of the last update on the environment. Valid values: SUCCESS, PENDING, FAILED.

              + *

              The status of the last update on the environment.

              */ Status?: UpdateStatus | string; @@ -476,6 +479,11 @@ export interface LastUpdate { *

              The error that was encountered during the last update of the environment.

              */ Error?: UpdateError; + + /** + *

              The source of the last update to the environment. Includes internal processes by Amazon MWAA, such as an environment maintenance update.

              + */ + Source?: string; } export namespace LastUpdate { @@ -488,16 +496,16 @@ export namespace LastUpdate { } /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              Describes the Apache Airflow log details for the log type (e.g. DagProcessingLogs).

              */ export interface ModuleLoggingConfiguration { /** - *

              Indicates whether to enable the Apache Airflow log type (e.g. DagProcessingLogs) in CloudWatch Logs.

              + *

              Indicates whether the Apache Airflow log type (e.g. DagProcessingLogs) is enabled.

              */ Enabled?: boolean; /** - *

              Defines the Apache Airflow logs to send for the log type (e.g. DagProcessingLogs) to CloudWatch Logs. Valid values: CRITICAL, ERROR, WARNING, INFO.

              + *

              The Apache Airflow log level for the log type (e.g. DagProcessingLogs).

              */ LogLevel?: LoggingLevel | string; @@ -517,31 +525,31 @@ export namespace ModuleLoggingConfiguration { } /** - *

              Defines the Apache Airflow logs to send to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              + *

              Describes the Apache Airflow log types that are published to CloudWatch Logs.

              */ export interface LoggingConfiguration { /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              The Airflow DAG processing logs published to CloudWatch Logs and the log level.

              */ DagProcessingLogs?: ModuleLoggingConfiguration; /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              The Airflow scheduler logs published to CloudWatch Logs and the log level.

              */ SchedulerLogs?: ModuleLoggingConfiguration; /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              The Airflow web server logs published to CloudWatch Logs and the log level.

              */ WebserverLogs?: ModuleLoggingConfiguration; /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              The Airflow worker logs published to CloudWatch Logs and the log level.

              */ WorkerLogs?: ModuleLoggingConfiguration; /** - *

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              + *

              The Airflow task logs published to CloudWatch Logs and the log level.

              */ TaskLogs?: ModuleLoggingConfiguration; } @@ -567,7 +575,7 @@ export enum EnvironmentStatus { } /** - *

              The Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              + *

              Describes an Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              */ export interface Environment { /** @@ -631,7 +639,7 @@ export interface Environment { WebserverUrl?: string; /** - *

              The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access AWS resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              + *

              The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              */ ExecutionRoleArn?: string; @@ -641,12 +649,12 @@ export interface Environment { ServiceRoleArn?: string; /** - *

              The Key Management Service (KMS) encryption key used to encrypt the data in your environment.

              + *

              The Amazon Web Services Key Management Service (KMS) encryption key used to encrypt the data in your environment.

              */ KmsKey?: string; /** - *

              The Apache Airflow version on your environment. For example, v1.10.12.

              + *

              The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2.

              */ AirflowVersion?: string; @@ -696,27 +704,27 @@ export interface Environment { MaxWorkers?: number; /** - *

              The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see About networking on Amazon MWAA.

              + *

              Describes the VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              */ NetworkConfiguration?: NetworkConfiguration; /** - *

              The Apache Airflow logs being sent to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              + *

              The Apache Airflow logs published to CloudWatch Logs.

              */ LoggingConfiguration?: LoggingConfiguration; /** - *

              The status of the last update on the environment, and any errors that were encountered.

              + *

              The status of the last update on the environment.

              */ LastUpdate?: LastUpdate; /** - *

              The day and time of the week that weekly maintenance updates are scheduled. For example: TUE:03:30.

              + *

              The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For example: TUE:03:30.

              */ WeeklyMaintenanceWindowStart?: string; /** - *

              The key-value tag pairs associated to your environment. For example, "Environment": "Staging". To learn more, see Tagging AWS resources.

              + *

              The key-value tag pairs associated to your environment. For example, "Environment": "Staging". To learn more, see Tagging Amazon Web Services resources.

              */ Tags?: { [key: string]: string }; @@ -784,7 +792,7 @@ export namespace ListEnvironmentsInput { export interface ListEnvironmentsOutput { /** - *

              Returns the list of Amazon MWAA environments.

              + *

              Returns a list of Amazon MWAA environments.

              */ Environments: string[] | undefined; @@ -821,7 +829,7 @@ export namespace ListTagsForResourceInput { export interface ListTagsForResourceOutput { /** - *

              The key-value tag pairs associated to your environment. To learn more, see Tagging AWS resources.

              + *

              The key-value tag pairs associated to your environment. To learn more, see Tagging Amazon Web Services resources.

              */ Tags?: { [key: string]: string }; } @@ -836,16 +844,19 @@ export namespace ListTagsForResourceOutput { } /** - *

              Internal only API.

              + *

              + * Internal only. Represents the dimensions of a metric. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

              */ export interface Dimension { /** - *

              Internal only API.

              + *

              + * Internal only. The name of the dimension.

              */ Name: string | undefined; /** - *

              Internal only API.

              + *

              + * Internal only. The value of the dimension.

              */ Value: string | undefined; } @@ -860,26 +871,31 @@ export namespace Dimension { } /** - *

              Internal only API.

              + *

              + * Internal only. Represents a set of statistics that describe a specific metric. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

              */ export interface StatisticSet { /** - *

              Internal only API.

              + *

              + * Internal only. The number of samples used for the statistic set.

              */ SampleCount?: number; /** - *

              Internal only API.

              + *

              + * Internal only. The sum of values for the sample set.

              */ Sum?: number; /** - *

              Internal only API.

              + *

              + * Internal only. The minimum value of the sample set.

              */ Minimum?: number; /** - *

              Internal only API.

              + *

              + * Internal only. The maximum value of the sample set.

              */ Maximum?: number; } @@ -924,36 +940,43 @@ export enum Unit { } /** - *

              Internal only API.

              + *

              + * Internal only. Collects Apache Airflow metrics. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

              */ export interface MetricDatum { /** - *

              Internal only API.

              + *

              + * Internal only. The name of the metric.

              */ MetricName: string | undefined; /** - *

              Internal only API.

              + *

              + * Internal only. The time the metric data was received.

              */ Timestamp: Date | undefined; /** - *

              Internal only API.

              + *

              + * Internal only. The dimensions associated with the metric.

              */ Dimensions?: Dimension[]; /** - *

              Internal only API.

              + *

              + * Internal only. The value for the metric.

              */ Value?: number; /** - * Unit + *

              + * Internal only. The unit used to store the metric.

              */ Unit?: Unit | string; /** - *

              Internal only API.

              + *

              + * Internal only. The statistical values for the metric.

              */ StatisticValues?: StatisticSet; } @@ -969,12 +992,14 @@ export namespace MetricDatum { export interface PublishMetricsInput { /** - *

              Publishes environment metric data to Amazon CloudWatch.

              + *

              + * Internal only. The name of the environment.

              */ EnvironmentName: string | undefined; /** - *

              Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metrica.

              + *

              + * Internal only. Publishes metrics to Amazon CloudWatch. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

              */ MetricData: MetricDatum[] | undefined; } @@ -1006,7 +1031,7 @@ export interface TagResourceInput { ResourceArn: string | undefined; /** - *

              The key-value tag pairs you want to associate to your environment. For example, "Environment": "Staging". To learn more, see Tagging AWS resources.

              + *

              The key-value tag pairs you want to associate to your environment. For example, "Environment": "Staging". To learn more, see Tagging Amazon Web Services resources.

              */ Tags: { [key: string]: string } | undefined; } @@ -1064,11 +1089,11 @@ export namespace UntagResourceOutput { } /** - *

              The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see About networking on Amazon MWAA.

              + *

              Defines the VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              */ export interface UpdateNetworkConfigurationInput { /** - *

              A list of 1 or more security group IDs. Accepts up to 5 security group IDs. A security group must be attached to the same VPC as the subnets. To learn more, see Security in your VPC on Amazon MWAA.

              + *

              A list of security group IDs. A security group must be attached to the same VPC as the subnets. To learn more, see Security in your VPC on Amazon MWAA.

              */ SecurityGroupIds: string[] | undefined; } @@ -1089,12 +1114,12 @@ export interface UpdateEnvironmentInput { Name: string | undefined; /** - *

              The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access AWS resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              + *

              The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              */ ExecutionRoleArn?: string; /** - *

              The Apache Airflow version for your environment. For example, v1.10.12. If no value is specified, defaults to the latest version. Valid values: v1.10.12.

              + *

              The Apache Airflow version for your environment. If no value is specified, defaults to the latest version. Valid values: 1.10.12, 2.0.2.

              */ AirflowVersion?: string; @@ -1144,22 +1169,17 @@ export interface UpdateEnvironmentInput { MaxWorkers?: number; /** - *

              The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see About networking on Amazon MWAA.

              + *

              The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              */ NetworkConfiguration?: UpdateNetworkConfigurationInput; /** - *

              Defines the Apache Airflow logs to send to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              + *

              The Apache Airflow log types to send to CloudWatch Logs.

              */ LoggingConfiguration?: LoggingConfigurationInput; /** - *

              The day and time of the week to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only. Supported input includes the following:

              - *
                - *
              • - *

                MON|TUE|WED|THU|FRI|SAT|SUN:([01]\\d|2[0-3]):(00|30)

                - *
              • - *
              + *

              The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

              */ WeeklyMaintenanceWindowStart?: string; diff --git a/clients/client-mwaa/src/protocols/Aws_restJson1.ts b/clients/client-mwaa/src/protocols/Aws_restJson1.ts index d76afe94a8d7..1f47125fc371 100644 --- a/clients/client-mwaa/src/protocols/Aws_restJson1.ts +++ b/clients/client-mwaa/src/protocols/Aws_restJson1.ts @@ -1610,6 +1610,7 @@ const deserializeAws_restJson1LastUpdate = (output: any, context: __SerdeContext output.Error !== undefined && output.Error !== null ? deserializeAws_restJson1UpdateError(output.Error, context) : undefined, + Source: __expectString(output.Source), Status: __expectString(output.Status), } as any; }; diff --git a/clients/client-opensearch/src/models/models_0.ts b/clients/client-opensearch/src/models/models_0.ts index f2da212532ea..951cc153ebd5 100644 --- a/clients/client-opensearch/src/models/models_0.ts +++ b/clients/client-opensearch/src/models/models_0.ts @@ -652,6 +652,16 @@ export interface AdvancedSecurityOptions { *

              Describes the SAML application configured for a domain.

              */ SAMLOptions?: SAMLOptionsOutput; + + /** + *

              Specifies the Anonymous Auth Disable Date when Anonymous Auth is enabled.

              + */ + AnonymousAuthDisableDate?: Date; + + /** + *

              True if Anonymous auth is enabled. Anonymous auth can be enabled only when AdvancedSecurity is enabled on existing domains.

              + */ + AnonymousAuthEnabled?: boolean; } export namespace AdvancedSecurityOptions { @@ -772,6 +782,11 @@ export interface AdvancedSecurityOptionsInput { *

              The SAML application configuration for the domain.

              */ SAMLOptions?: SAMLOptionsInput; + + /** + *

              True if Anonymous auth is enabled. Anonymous auth can be enabled only when AdvancedSecurity is enabled on existing domains.

              + */ + AnonymousAuthEnabled?: boolean; } export namespace AdvancedSecurityOptionsInput { diff --git a/clients/client-opensearch/src/protocols/Aws_restJson1.ts b/clients/client-opensearch/src/protocols/Aws_restJson1.ts index aa6912cb316a..b9c69fd28f35 100644 --- a/clients/client-opensearch/src/protocols/Aws_restJson1.ts +++ b/clients/client-opensearch/src/protocols/Aws_restJson1.ts @@ -5005,6 +5005,8 @@ const serializeAws_restJson1AdvancedSecurityOptionsInput = ( context: __SerdeContext ): any => { return { + ...(input.AnonymousAuthEnabled !== undefined && + input.AnonymousAuthEnabled !== null && { AnonymousAuthEnabled: input.AnonymousAuthEnabled }), ...(input.Enabled !== undefined && input.Enabled !== null && { Enabled: input.Enabled }), ...(input.InternalUserDatabaseEnabled !== undefined && input.InternalUserDatabaseEnabled !== null && { InternalUserDatabaseEnabled: input.InternalUserDatabaseEnabled }), @@ -5440,6 +5442,11 @@ const deserializeAws_restJson1AdvancedSecurityOptions = ( context: __SerdeContext ): AdvancedSecurityOptions => { return { + AnonymousAuthDisableDate: + output.AnonymousAuthDisableDate !== undefined && output.AnonymousAuthDisableDate !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.AnonymousAuthDisableDate))) + : undefined, + AnonymousAuthEnabled: __expectBoolean(output.AnonymousAuthEnabled), Enabled: __expectBoolean(output.Enabled), InternalUserDatabaseEnabled: __expectBoolean(output.InternalUserDatabaseEnabled), SAMLOptions: diff --git a/clients/client-quicksight/src/QuickSight.ts b/clients/client-quicksight/src/QuickSight.ts index 9e9d99f8a22e..9f365a49141b 100644 --- a/clients/client-quicksight/src/QuickSight.ts +++ b/clients/client-quicksight/src/QuickSight.ts @@ -693,7 +693,7 @@ export class QuickSight extends QuickSightClient { } /** - *

              Creates a dataset.

              + *

              Creates a dataset. This operation doesn't support datasets that include uploaded files as a source.

              */ public createDataSet( args: CreateDataSetCommandInput, @@ -915,7 +915,10 @@ export class QuickSight extends QuickSightClient { } /** - *

              Creates and starts a new SPICE ingestion on a dataset

              + *

              Creates and starts a new SPICE ingestion for a dataset. You can manually refresh datasets in + * an Enterprise edition account 32 times in a 24-hour period. You can manually refresh + * datasets in a Standard edition account 8 times in a 24-hour period. Each 24-hour period + * is measured starting 24 hours before the current date and time.

              * *

              Any ingestions operating on tagged datasets inherit the same tags automatically for use in * access control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using @@ -1914,7 +1917,7 @@ export class QuickSight extends QuickSightClient { } /** - *

              Describes a dataset.

              + *

              Describes a dataset. This operation doesn't support datasets that include uploaded files as a source.

              */ public describeDataSet( args: DescribeDataSetCommandInput, @@ -3889,7 +3892,7 @@ export class QuickSight extends QuickSightClient { } /** - *

              Updates a dataset.

              + *

              Updates a dataset. This operation doesn't support datasets that include uploaded files as a source.

              */ public updateDataSet( args: UpdateDataSetCommandInput, diff --git a/clients/client-quicksight/src/commands/CreateDataSetCommand.ts b/clients/client-quicksight/src/commands/CreateDataSetCommand.ts index 8af08ad37e40..df81002824a1 100644 --- a/clients/client-quicksight/src/commands/CreateDataSetCommand.ts +++ b/clients/client-quicksight/src/commands/CreateDataSetCommand.ts @@ -22,7 +22,7 @@ export interface CreateDataSetCommandInput extends CreateDataSetRequest {} export interface CreateDataSetCommandOutput extends CreateDataSetResponse, __MetadataBearer {} /** - *

              Creates a dataset.

              + *

              Creates a dataset. This operation doesn't support datasets that include uploaded files as a source.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-quicksight/src/commands/CreateIngestionCommand.ts b/clients/client-quicksight/src/commands/CreateIngestionCommand.ts index aa6138a5f4b3..ff014bd13134 100644 --- a/clients/client-quicksight/src/commands/CreateIngestionCommand.ts +++ b/clients/client-quicksight/src/commands/CreateIngestionCommand.ts @@ -22,7 +22,10 @@ export interface CreateIngestionCommandInput extends CreateIngestionRequest {} export interface CreateIngestionCommandOutput extends CreateIngestionResponse, __MetadataBearer {} /** - *

              Creates and starts a new SPICE ingestion on a dataset

              + *

              Creates and starts a new SPICE ingestion for a dataset. You can manually refresh datasets in + * an Enterprise edition account 32 times in a 24-hour period. You can manually refresh + * datasets in a Standard edition account 8 times in a 24-hour period. Each 24-hour period + * is measured starting 24 hours before the current date and time.

              * *

              Any ingestions operating on tagged datasets inherit the same tags automatically for use in * access control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using diff --git a/clients/client-quicksight/src/commands/DescribeDataSetCommand.ts b/clients/client-quicksight/src/commands/DescribeDataSetCommand.ts index 10e0cfdf0d22..412fad4ca132 100644 --- a/clients/client-quicksight/src/commands/DescribeDataSetCommand.ts +++ b/clients/client-quicksight/src/commands/DescribeDataSetCommand.ts @@ -22,7 +22,7 @@ export interface DescribeDataSetCommandInput extends DescribeDataSetRequest {} export interface DescribeDataSetCommandOutput extends DescribeDataSetResponse, __MetadataBearer {} /** - *

              Describes a dataset.

              + *

              Describes a dataset. This operation doesn't support datasets that include uploaded files as a source.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-quicksight/src/commands/UpdateDataSetCommand.ts b/clients/client-quicksight/src/commands/UpdateDataSetCommand.ts index 35d3d75c95a6..5cb84fa69915 100644 --- a/clients/client-quicksight/src/commands/UpdateDataSetCommand.ts +++ b/clients/client-quicksight/src/commands/UpdateDataSetCommand.ts @@ -22,7 +22,7 @@ export interface UpdateDataSetCommandInput extends UpdateDataSetRequest {} export interface UpdateDataSetCommandOutput extends UpdateDataSetResponse, __MetadataBearer {} /** - *

              Updates a dataset.

              + *

              Updates a dataset. This operation doesn't support datasets that include uploaded files as a source.

              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rds/src/RDS.ts b/clients/client-rds/src/RDS.ts index 7e03bebea4b7..fa1cf202f468 100644 --- a/clients/client-rds/src/RDS.ts +++ b/clients/client-rds/src/RDS.ts @@ -1340,7 +1340,7 @@ export class RDS extends RDSClient { * RU/RUR.

              *

              Amazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software. * The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create - * your database. With Amazon RDS Custom, you upload your database installation files in Amazon S3.

              + * your database. With Amazon RDS Custom for Oracle, you upload your database installation files in Amazon S3.

              *

              When you create a custom engine version, you specify the files in a JSON document called a CEV manifest. * This document describes installation .zip files stored in Amazon S3. RDS Custom creates your CEV from * the installation files that you provided. This service model is called Bring Your Own Media (BYOM).

              diff --git a/clients/client-rds/src/commands/CreateCustomDBEngineVersionCommand.ts b/clients/client-rds/src/commands/CreateCustomDBEngineVersionCommand.ts index 400089d977f8..b96e93965995 100644 --- a/clients/client-rds/src/commands/CreateCustomDBEngineVersionCommand.ts +++ b/clients/client-rds/src/commands/CreateCustomDBEngineVersionCommand.ts @@ -27,7 +27,7 @@ export interface CreateCustomDBEngineVersionCommandOutput extends DBEngineVersio * RU/RUR.

              *

              Amazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software. * The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create - * your database. With Amazon RDS Custom, you upload your database installation files in Amazon S3.

              + * your database. With Amazon RDS Custom for Oracle, you upload your database installation files in Amazon S3.

              *

              When you create a custom engine version, you specify the files in a JSON document called a CEV manifest. * This document describes installation .zip files stored in Amazon S3. RDS Custom creates your CEV from * the installation files that you provided. This service model is called Bring Your Own Media (BYOM).

              diff --git a/clients/client-rds/src/models/models_0.ts b/clients/client-rds/src/models/models_0.ts index 671e85381fdf..a97ceee30d22 100644 --- a/clients/client-rds/src/models/models_0.ts +++ b/clients/client-rds/src/models/models_0.ts @@ -1750,7 +1750,24 @@ export interface DBClusterSnapshot { AllocatedStorage?: number; /** - *

              Specifies the status of this DB cluster snapshot.

              + *

              Specifies the status of this DB cluster snapshot. Valid statuses are the following:

              + *
                + *
              • + *

                + * available + *

                + *
              • + *
              • + *

                + * copying + *

                + *
              • + *
              • + *

                + * creating + *

                + *
              • + *
              */ Status?: string; @@ -3200,7 +3217,7 @@ export interface CreateCustomDBEngineVersionMessage { /** *

              The name of your CEV. The name format is 19.customized_string * . For example, - * a valid name is 19.my_cev1. This setting is required for RDS Custom, but optional for Amazon RDS. + * a valid name is 19.my_cev1. This setting is required for RDS Custom for Oracle, but optional for Amazon RDS. * The combination of Engine and EngineVersion is unique per customer per Region.

              */ EngineVersion: string | undefined; @@ -3767,7 +3784,7 @@ export interface CreateDBClusterMessage { *
            • *
            • *

              - * aurora-mysql (for MySQL 5.7-compatible Aurora)

              + * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

              *
            • *
            • *

              @@ -3795,7 +3812,7 @@ export interface CreateDBClusterMessage { *

              * aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" *

              - *

              To list all of the available engine versions for MySQL 5.7-compatible Aurora, use the following command:

              + *

              To list all of the available engine versions for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora, use the following command:

              *

              * aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" *

              @@ -4203,15 +4220,13 @@ export interface CreateDBClusterMessage { *

              Specifies the storage type to be associated with the DB cluster.

              *

              This setting is required to create a Multi-AZ DB cluster.

              *

              - * Valid values: standard | gp2 | io1 + * Valid values: io1 *

              *

              - * If you specify io1, also include a value for the - * Iops parameter. + * When specified, a value for the Iops parameter is required. *

              *

              - * Default: io1 if the Iops parameter - * is specified, otherwise gp2 + * Default: io1 *

              *

              Valid for: Multi-AZ DB clusters only

              */ @@ -4964,7 +4979,7 @@ export interface DBCluster { DBClusterInstanceClass?: string; /** - *

              The storage type associated with DB instance.

              + *

              The storage type associated with the DB cluster.

              *

              This setting is only for non-Aurora Multi-AZ DB clusters.

              */ StorageType?: string; @@ -5515,7 +5530,7 @@ export interface CreateDBClusterParameterGroupMessage { *

              * Aurora MySQL *

              - *

              Example: aurora5.6, aurora-mysql5.7 + *

              Example: aurora5.6, aurora-mysql5.7, aurora-mysql8.0 *

              *

              * Aurora PostgreSQL @@ -5551,7 +5566,7 @@ export interface CreateDBClusterParameterGroupMessage { *

            • *
            • *

              - * aurora-mysql (for MySQL 5.7-compatible Aurora)

              + * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

              *
            • *
            • *

              @@ -5789,7 +5804,7 @@ export interface CreateDBInstanceMessage { *

            • *
            *

            - * Amazon RDS Custom + * Amazon RDS Custom for Oracle *

            *

            The Oracle System ID (SID) of the created RDS Custom DB instance. * If you don't specify a value, the default value is ORCL. @@ -5809,6 +5824,10 @@ export interface CreateDBInstanceMessage { *

          • *
          *

          + * Amazon RDS Custom for SQL Server + *

          + *

          Not applicable. Must be null.

          + *

          * SQL Server *

          *

          Not applicable. Must be null.

          @@ -5886,10 +5905,12 @@ export interface CreateDBInstanceMessage { *

          *
            *
          • - *

            General Purpose (SSD) storage (gp2): Must be an integer from 40 to 65536.

            + *

            General Purpose (SSD) storage (gp2): Must be an integer from 40 to 65536 for RDS Custom for Oracle, + * 16384 for RDS Custom for SQL Server.

            *
          • *
          • - *

            Provisioned IOPS storage (io1): Must be an integer from 40 to 65536.

            + *

            Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, + * 16384 for RDS Custom for SQL Server.

            *
          • *
          * @@ -5971,7 +5992,7 @@ export interface CreateDBInstanceMessage { *

          General Purpose (SSD) storage (gp2):

          *
            *
          • - *

            Enterprise and Standard editions: Must be an integer from 200 to 16384.

            + *

            Enterprise and Standard editions: Must be an integer from 20 to 16384.

            *
          • *
          • *

            Web and Express editions: Must be an integer from 20 to 16384.

            @@ -5982,7 +6003,7 @@ export interface CreateDBInstanceMessage { *

            Provisioned IOPS storage (io1):

            *
              *
            • - *

              Enterprise and Standard editions: Must be an integer from 200 to 16384.

              + *

              Enterprise and Standard editions: Must be an integer from 100 to 16384.

              *
            • *
            • *

              Web and Express editions: Must be an integer from 100 to 16384.

              @@ -5993,7 +6014,7 @@ export interface CreateDBInstanceMessage { *

              Magnetic storage (standard):

              *
              */ @@ -6389,18 +6430,24 @@ export interface CreateDBInstanceMessage { * instance is managed by the DB cluster.

              * *

              - * Amazon RDS Custom + * Amazon RDS Custom for Oracle *

              - *

              A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom. The CEV + *

              A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV * name has the following format: 19.customized_string * . An example identifier is * 19.my_cev1. For more information, see - * Creating an RDS Custom DB instance in the Amazon RDS User Guide..

              + * Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide..

              * *

              - * MariaDB + * Amazon RDS Custom for SQL Server + *

              + *

              See RDS Custom for SQL Server general requirements + * in the Amazon RDS User Guide. *

              * + *

              + * MariaDB + *

              *

              For information, see MariaDB on Amazon RDS Versions in the * Amazon RDS User Guide. *

              @@ -6408,7 +6455,6 @@ export interface CreateDBInstanceMessage { *

              * Microsoft SQL Server *

              - * *

              For information, see Microsoft SQL Server Versions on Amazon RDS in the * Amazon RDS User Guide. *

              @@ -6416,7 +6462,6 @@ export interface CreateDBInstanceMessage { *

              * MySQL *

              - * *

              For information, see MySQL on Amazon RDS Versions in the * Amazon RDS User Guide. *

              @@ -6424,7 +6469,6 @@ export interface CreateDBInstanceMessage { *

              * Oracle *

              - * *

              For information, see Oracle Database Engine Release Notes in the * Amazon RDS User Guide. *

              @@ -6432,7 +6476,6 @@ export interface CreateDBInstanceMessage { *

              * PostgreSQL *

              - * *

              For information, see Amazon RDS for PostgreSQL versions and extensions in the * Amazon RDS User Guide. *

              @@ -6565,7 +6608,7 @@ export interface CreateDBInstanceMessage { /** *

              A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.

              - *

              For RDS Custom Oracle instances, either set this parameter to true or leave it unset. + *

              For RDS Custom instances, either set this parameter to true or leave it unset. * If you set this parameter to false, RDS reports an error.

              *

              * Amazon Aurora @@ -6591,8 +6634,8 @@ export interface CreateDBInstanceMessage { *

              * Amazon RDS Custom *

              - *

              A KMS key is required for RDS Custom Oracle instances. For most RDS engines, if you leave this parameter empty - * while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom for Oracle + *

              A KMS key is required for RDS Custom instances. For most RDS engines, if you leave this parameter empty + * while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom * doesn't use the default key when this parameter is empty. You must explicitly specify a key.

              */ KmsKeyId?: string; @@ -8542,7 +8585,7 @@ export interface CreateDBParameterGroupMessage { *
            • *
            • *

              - * aurora-mysql (for MySQL 5.7-compatible Aurora)

              + * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

              *
            • *
            • *

              @@ -12649,7 +12692,7 @@ export interface DescribeDBEngineVersionsMessage { *

            • *
            • *

              - * aurora-mysql (for MySQL 5.7-compatible Aurora)

              + * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

              *
            • *
            • *

              diff --git a/clients/client-rds/src/models/models_1.ts b/clients/client-rds/src/models/models_1.ts index b5b3b2b9d5d9..4eefc3dbcbf1 100644 --- a/clients/client-rds/src/models/models_1.ts +++ b/clients/client-rds/src/models/models_1.ts @@ -1913,7 +1913,7 @@ export interface DescribeOrderableDBInstanceOptionsMessage { *

            • *
            • *

              - * aurora-mysql (for MySQL 5.7-compatible Aurora)

              + * aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

              *
            • *
            • *

              @@ -3676,7 +3676,7 @@ export interface ModifyCustomDBEngineVersionMessage { /** *

              The custom engine version (CEV) that you want to modify. This option is required for - * RDS Custom, but optional for Amazon RDS. The combination of Engine and + * RDS Custom for Oracle, but optional for Amazon RDS. The combination of Engine and * EngineVersion is unique per customer per Amazon Web Services Region.

              */ EngineVersion: string | undefined; @@ -3930,7 +3930,7 @@ export interface ModifyDBClusterMessage { *

              * aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" *

              - *

              To list all of the available engine versions for MySQL 5.7-compatible Aurora, use the following command:

              + *

              To list all of the available engine versions for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora, use the following command:

              *

              * aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" *

              @@ -4064,15 +4064,13 @@ export interface ModifyDBClusterMessage { /** *

              Specifies the storage type to be associated with the DB cluster.

              *

              - * Valid values: standard | gp2 | io1 + * Valid values: io1 *

              *

              - * If you specify io1, you must also include a value for the - * Iops parameter. + * When specified, a value for the Iops parameter is required. *

              *

              - * Default: io1 if the Iops parameter - * is specified, otherwise gp2 + * Default: io1 *

              *

              Valid for: Multi-AZ DB clusters only

              */ @@ -4440,7 +4438,7 @@ export interface ModifyDBInstanceMessage { * The change is applied during the next maintenance window, * unless ApplyImmediately is enabled for this request. *

              - *

              This setting doesn't apply to RDS Custom.

              + *

              This setting doesn't apply to RDS Custom for Oracle.

              *

              Default: Uses existing setting

              */ DBInstanceClass?: string; @@ -4496,19 +4494,14 @@ export interface ModifyDBInstanceMessage { VpcSecurityGroupIds?: string[]; /** - *

              A value that indicates whether the modifications in this request and - * any pending modifications are asynchronously applied - * as soon as possible, regardless of the - * PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is - * disabled. - *

              + *

              A value that indicates whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, + * regardless of the PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is disabled.

              *

              - * If this parameter is disabled, changes to the - * DB instance are applied during the next maintenance window. Some parameter changes can cause an outage - * and are applied on the next call to RebootDBInstance, or the next failure reboot. - * Review the table of parameters in Modifying a DB Instance - * in the Amazon RDS User Guide. to see the impact of enabling - * or disabling ApplyImmediately for each modified parameter and to determine when the changes are applied. + * If this parameter is disabled, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage + * and are applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in + * Modifying a DB Instance in the + * Amazon RDS User Guide to see the impact of enabling or disabling ApplyImmediately for each modified parameter and to + * determine when the changes are applied. *

              */ ApplyImmediately?: boolean; @@ -4598,7 +4591,7 @@ export interface ModifyDBInstanceMessage { *
                *
              • *

                It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to - * read replicas. It can't be set to 0 or 35 for an RDS Custom DB instance.

                + * read replicas. It can't be set to 0 or 35 for an RDS Custom for Oracle DB instance.

                *
              • *
              • *

                It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or @@ -4688,7 +4681,7 @@ export interface ModifyDBInstanceMessage { * default minor version if the current minor version is lower. * For information about valid engine versions, see CreateDBInstance, * or call DescribeDBEngineVersions.

                - *

                In RDS Custom, this parameter is supported for read replicas only if they are in the + *

                In RDS Custom for Oracle, this parameter is supported for read replicas only if they are in the * PATCH_DB_FAILURE lifecycle. *

                */ @@ -4950,7 +4943,6 @@ export interface ModifyDBInstanceMessage { *

                *

                Changes to the PubliclyAccessible parameter are applied immediately regardless * of the value of the ApplyImmediately parameter.

                - *

                This setting doesn't apply to RDS Custom.

                */ PubliclyAccessible?: boolean; @@ -5214,17 +5206,20 @@ export interface ModifyDBParameterGroupMessage { /** *

                An array of parameter names, values, and the application methods for the parameter update. At least one parameter name, value, and - * application method method must be supplied; later arguments are optional. A maximum of 20 parameters can be modified in a single request.

                + * application method must be supplied; later arguments are optional. A maximum of 20 parameters can be modified in a single request.

                *

                Valid Values (for the application method): immediate | pending-reboot *

                + *

                You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic + * and static parameters.

                + *

                When the application method is immediate, changes to dynamic parameters are applied immediately to the DB instances associated with + * the parameter group.

                + *

                When the application method is pending-reboot, changes to dynamic and static parameters are applied after a reboot without failover + * to the DB instances associated with the parameter group.

                * - *

                You can use the immediate value with dynamic parameters only. You can use the - * pending-reboot value for both dynamic and static parameters.

                - *

                When the application method is immediate, changes to dynamic parameters are applied immediately - * to the DB instances associated with the parameter group. When the application method is pending-reboot, - * changes to dynamic and static parameters are applied after a reboot without failover to the DB instances associated with the - * parameter group.

                + *

                You can't use pending-reboot with dynamic parameters on RDS for SQL Server DB instances. Use immediate.

                *
                + *

                For more information on modifying DB parameters, see Working + * with DB parameter groups in the Amazon RDS User Guide.

                */ Parameters: Parameter[] | undefined; } @@ -5825,7 +5820,7 @@ export interface ModifyGlobalClusterMessage { *

                * aws rds describe-db-engine-versions --engine aurora --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]' *

                - *

                To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

                + *

                To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:

                *

                * aws rds describe-db-engine-versions --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]' *

                @@ -6821,7 +6816,7 @@ export interface RestoreDBClusterFromS3Message { /** *

                The name of the database engine to be used for this DB cluster.

                - *

                Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible Aurora), and aurora-postgresql + *

                Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql *

                */ Engine: string | undefined; @@ -6832,7 +6827,7 @@ export interface RestoreDBClusterFromS3Message { *

                * aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" *

                - *

                To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

                + *

                To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:

                *

                * aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" *

                @@ -6843,7 +6838,7 @@ export interface RestoreDBClusterFromS3Message { *

                * Aurora MySQL *

                - *

                Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5 + *

                Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5, 8.0.mysql_aurora.3.01.0 *

                *

                * Aurora PostgreSQL @@ -7203,7 +7198,7 @@ export interface RestoreDBClusterFromSnapshotMessage { *

                * aws rds describe-db-engine-versions --engine aurora --query "DBEngineVersions[].EngineVersion" *

                - *

                To list all of the available engine versions for MySQL 5.7-compatible Aurora, use the following command:

                + *

                To list all of the available engine versions for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora, use the following command:

                *

                * aws rds describe-db-engine-versions --engine aurora-mysql --query "DBEngineVersions[].EngineVersion" *

                @@ -7432,15 +7427,13 @@ export interface RestoreDBClusterFromSnapshotMessage { /** *

                Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.

                *

                - * Valid values: standard | gp2 | io1 + * Valid values: io1 *

                *

                - * If you specify io1, you must also include a value for the - * Iops parameter. + * When specified, a value for the Iops parameter is required. *

                *

                - * Default: io1 if the Iops parameter - * is specified, otherwise gp2 + * Default: io1 *

                *

                Valid for: Aurora DB clusters and Multi-AZ DB clusters

                */ @@ -7800,15 +7793,13 @@ export interface RestoreDBClusterToPointInTimeMessage { /** *

                Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.

                *

                - * Valid values: standard | gp2 | io1 + * Valid values: io1 *

                *

                - * If you specify io1, also include a value for the - * Iops parameter. + * When specified, a value for the Iops parameter is required. *

                *

                - * Default: io1 if the Iops parameter - * is specified, otherwise gp2 + * Default: io1 *

                *

                Valid for: Multi-AZ DB clusters only

                */ diff --git a/clients/client-rekognition/src/Rekognition.ts b/clients/client-rekognition/src/Rekognition.ts index 0aa8638a87f6..dfb107d6ce7b 100644 --- a/clients/client-rekognition/src/Rekognition.ts +++ b/clients/client-rekognition/src/Rekognition.ts @@ -2018,7 +2018,7 @@ export class Rekognition extends RekognitionClient { *

                If you request all facial attributes (by using the detectionAttributes * parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for * example, location of eye and mouth) and other facial attributes. If you provide - * the same image, specify the same collection, and use the same external ID in the + * the same image, specify the same collection, use the same external ID, and use the same model version in the * IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

                * * diff --git a/clients/client-rekognition/src/commands/IndexFacesCommand.ts b/clients/client-rekognition/src/commands/IndexFacesCommand.ts index d1d15ab43d0c..cf3559ce6c46 100644 --- a/clients/client-rekognition/src/commands/IndexFacesCommand.ts +++ b/clients/client-rekognition/src/commands/IndexFacesCommand.ts @@ -110,7 +110,7 @@ export interface IndexFacesCommandOutput extends IndexFacesResponse, __MetadataB *

                If you request all facial attributes (by using the detectionAttributes * parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for * example, location of eye and mouth) and other facial attributes. If you provide - * the same image, specify the same collection, and use the same external ID in the + * the same image, specify the same collection, use the same external ID, and use the same model version in the * IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

                * * diff --git a/clients/client-rekognition/src/models/models_0.ts b/clients/client-rekognition/src/models/models_0.ts index ff8154a43b99..3c27e4f60c3a 100644 --- a/clients/client-rekognition/src/models/models_0.ts +++ b/clients/client-rekognition/src/models/models_0.ts @@ -1578,7 +1578,7 @@ export interface CreateCollectionResponse { CollectionArn?: string; /** - *

                Version number of the face detection model associated with the collection you are creating.

                + *

                Latest face model being used with the collection. For more information, see Model versioning.

                */ FaceModelVersion?: string; } @@ -4292,6 +4292,13 @@ export interface Face { * as a tree).

                */ Confidence?: number; + + /** + *

                + * The version of the face detect and storage model that was used when indexing the face vector. + *

                + */ + IndexFacesModelVersion?: string; } export namespace Face { @@ -5643,8 +5650,7 @@ export interface IndexFacesResponse { OrientationCorrection?: OrientationCorrection | string; /** - *

                The version number of the face detection model that's associated with the input - * collection (CollectionId).

                + *

                Latest face model being used with the collection. For more information, see Model versioning.

                */ FaceModelVersion?: string; @@ -5700,7 +5706,7 @@ export interface ListCollectionsResponse { NextToken?: string; /** - *

                Version numbers of the face detection models associated with the collections in the array CollectionIds. + *

                Latest face models being used with the corresponding collections in the array. For more information, see Model versioning. * For example, the value of FaceModelVersions[2] is the version number for the face detection model used * by the collection in CollectionId[2].

                */ @@ -5900,7 +5906,7 @@ export interface ListFacesResponse { NextToken?: string; /** - *

                Version number of the face detection model associated with the input collection (CollectionId).

                + *

                Latest face model being used with the collection. For more information, see Model versioning.

                */ FaceModelVersion?: string; } @@ -6161,7 +6167,7 @@ export interface SearchFacesResponse { FaceMatches?: FaceMatch[]; /** - *

                Version number of the face detection model associated with the input collection (CollectionId).

                + *

                Latest face model being used with the collection. For more information, see Model versioning.

                */ FaceModelVersion?: string; } @@ -6251,7 +6257,7 @@ export interface SearchFacesByImageResponse { FaceMatches?: FaceMatch[]; /** - *

                Version number of the face detection model associated with the input collection (CollectionId).

                + *

                Latest face model being used with the collection. For more information, see Model versioning.

                */ FaceModelVersion?: string; } diff --git a/clients/client-rekognition/src/protocols/Aws_json1_1.ts b/clients/client-rekognition/src/protocols/Aws_json1_1.ts index 2facca8a9b30..1b921415b399 100644 --- a/clients/client-rekognition/src/protocols/Aws_json1_1.ts +++ b/clients/client-rekognition/src/protocols/Aws_json1_1.ts @@ -9455,6 +9455,7 @@ const deserializeAws_json1_1Face = (output: any, context: __SerdeContext): Face ExternalImageId: __expectString(output.ExternalImageId), FaceId: __expectString(output.FaceId), ImageId: __expectString(output.ImageId), + IndexFacesModelVersion: __expectString(output.IndexFacesModelVersion), } as any; }; diff --git a/clients/client-s3-control/src/models/models_0.ts b/clients/client-s3-control/src/models/models_0.ts index 7e232e485ae1..a9f2a5e73a80 100644 --- a/clients/client-s3-control/src/models/models_0.ts +++ b/clients/client-s3-control/src/models/models_0.ts @@ -1191,16 +1191,16 @@ export enum S3GlacierJobTier { */ export interface S3InitiateRestoreObjectOperation { /** - *

                This argument specifies how long the S3 Glacier or S3 Glacier Deep Archive object remains available in Amazon S3. + *

                This argument specifies how long the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive object remains available in Amazon S3. * S3 Initiate Restore Object jobs that - * target S3 Glacier and S3 Glacier Deep Archive objects require ExpirationInDays set to 1 + * target S3 Glacier Flexible Retrieval and S3 Glacier Deep Archive objects require ExpirationInDays set to 1 * or greater.

                *

                Conversely, do not set ExpirationInDays when * creating S3 Initiate Restore Object jobs that target * S3 Intelligent-Tiering Archive Access and Deep Archive Access tier objects. Objects in * S3 Intelligent-Tiering archive access tiers are not subject to restore expiry, so * specifying ExpirationInDays results in restore request failure.

                - *

                S3 Batch Operations jobs can operate either on S3 Glacier and S3 Glacier Deep Archive storage class + *

                S3 Batch Operations jobs can operate either on S3 Glacier Flexible Retrieval and S3 Glacier Deep Archive storage class * objects or on S3 Intelligent-Tiering Archive Access and Deep Archive Access storage tier * objects, but not both types in the same job. If you need to restore objects of both types * you must create separate Batch Operations jobs.

                diff --git a/clients/client-s3/src/S3.ts b/clients/client-s3/src/S3.ts index 4b7f7e6317b9..551de41e3517 100644 --- a/clients/client-s3/src/S3.ts +++ b/clients/client-s3/src/S3.ts @@ -877,11 +877,11 @@ export class S3 extends S3Client { *

                When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or * groups that should be granted specific permissions on the bucket.

                * - *

                If your CreateBucket request includes the BucketOwnerEnforced value for - * the x-amz-object-ownership header, your request can either not specify - * an ACL or specify bucket owner full control ACLs, such as the bucket-owner-full-control - * canned ACL or an equivalent ACL expressed in the XML format. For - * more information, see Controlling object + *

                If your CreateBucket request sets bucket owner enforced for S3 Object Ownership and + * specifies a bucket ACL that provides access to an external Amazon Web Services account, your request + * fails with a 400 error and returns the + * InvalidBucketAclWithObjectOwnership error code. For more information, + * see Controlling object * ownership in the Amazon S3 User Guide.

                *
                *

                There are two ways to grant the appropriate permissions using the request headers.

                @@ -7721,7 +7721,8 @@ export class S3 extends S3Client { *

                This action is not supported by Amazon S3 on Outposts.

                *

                For more information about Amazon S3 Select, * see Selecting Content from - * Objects in the Amazon S3 User Guide.

                + * Objects and SELECT + * Command in the Amazon S3 User Guide.

                *

                For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select * and S3 Glacier Select in the Amazon S3 User Guide.

                *

                diff --git a/clients/client-s3/src/commands/CreateBucketCommand.ts b/clients/client-s3/src/commands/CreateBucketCommand.ts index bee584ac1992..b25a6a87fec9 100644 --- a/clients/client-s3/src/commands/CreateBucketCommand.ts +++ b/clients/client-s3/src/commands/CreateBucketCommand.ts @@ -49,11 +49,11 @@ export interface CreateBucketCommandOutput extends CreateBucketOutput, __Metadat *

                When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or * groups that should be granted specific permissions on the bucket.

                * - *

                If your CreateBucket request includes the BucketOwnerEnforced value for - * the x-amz-object-ownership header, your request can either not specify - * an ACL or specify bucket owner full control ACLs, such as the bucket-owner-full-control - * canned ACL or an equivalent ACL expressed in the XML format. For - * more information, see Controlling object + *

                If your CreateBucket request sets bucket owner enforced for S3 Object Ownership and + * specifies a bucket ACL that provides access to an external Amazon Web Services account, your request + * fails with a 400 error and returns the + * InvalidBucketAclWithObjectOwnership error code. For more information, + * see Controlling object * ownership in the Amazon S3 User Guide.

                *
                *

                There are two ways to grant the appropriate permissions using the request headers.

                diff --git a/clients/client-s3/src/commands/SelectObjectContentCommand.ts b/clients/client-s3/src/commands/SelectObjectContentCommand.ts index 129d22b17ca0..432dffa8e0d4 100644 --- a/clients/client-s3/src/commands/SelectObjectContentCommand.ts +++ b/clients/client-s3/src/commands/SelectObjectContentCommand.ts @@ -34,7 +34,8 @@ export interface SelectObjectContentCommandOutput extends SelectObjectContentOut *

                This action is not supported by Amazon S3 on Outposts.

                *

                For more information about Amazon S3 Select, * see Selecting Content from - * Objects in the Amazon S3 User Guide.

                + * Objects and SELECT + * Command in the Amazon S3 User Guide.

                *

                For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select * and S3 Glacier Select in the Amazon S3 User Guide.

                *

                diff --git a/clients/client-s3/src/models/models_0.ts b/clients/client-s3/src/models/models_0.ts index 98ccb097f310..46b0459b7d6c 100644 --- a/clients/client-s3/src/models/models_0.ts +++ b/clients/client-s3/src/models/models_0.ts @@ -4404,7 +4404,9 @@ export namespace GetBucketCorsRequest { /** *

                Describes the default server-side encryption to apply to new objects in the bucket. If a * PUT Object request doesn't specify any server-side encryption, this default encryption will - * be applied. For more information, see PUT Bucket encryption in + * be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates + * an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted with + * SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption in * the Amazon S3 API Reference.

                */ export interface ServerSideEncryptionByDefault { @@ -4417,9 +4419,9 @@ export interface ServerSideEncryptionByDefault { *

                Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default * encryption. This parameter is allowed if and only if SSEAlgorithm is set to * aws:kms.

                - *

                You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if you - * are using encryption with cross-account operations, you must use a fully qualified KMS key ARN. - * For more information, see Using encryption for cross-account operations.

                + *

                You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if + * you are using encryption with cross-account or Amazon Web Services service operations you must use a fully qualified KMS + * key ARN. For more information, see Using encryption for cross-account operations.

                *

                * For example: *

                @@ -8594,12 +8596,8 @@ export interface HeadObjectRequest { Key: string | undefined; /** - *

                Downloads the specified range bytes of an object. For more information about the HTTP - * Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

                - * - *

                Amazon S3 doesn't support retrieving multiple ranges of data per GET - * request.

                - *
                + *

                Because HeadObject returns only the metadata for an object, this parameter + * has no effect.

                */ Range?: string; diff --git a/clients/client-sagemaker/src/SageMaker.ts b/clients/client-sagemaker/src/SageMaker.ts index f8012e922f83..a15589b28d2b 100644 --- a/clients/client-sagemaker/src/SageMaker.ts +++ b/clients/client-sagemaker/src/SageMaker.ts @@ -8994,14 +8994,14 @@ export class SageMaker extends SageMakerClient { *

                *

                A pipeline execution won't stop while a callback step is running. * When you call StopPipelineExecution - * on a pipeline execution with a running callback step, SageMaker Pipelines sends an + * on a pipeline execution with a running callback step, Amazon SageMaker Pipelines sends an * additional Amazon SQS message to the specified SQS queue. The body of the SQS message * contains a "Status" field which is set to "Stopping".

                *

                You should add logic to your Amazon SQS message consumer to take any needed action (for * example, resource cleanup) upon receipt of the message followed by a call to * SendPipelineExecutionStepSuccess or * SendPipelineExecutionStepFailure.

                - *

                Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.

                + *

                Only when Amazon SageMaker Pipelines receives one of these calls will it stop the pipeline execution.

                * *

                * Lambda Step @@ -9113,11 +9113,11 @@ export class SageMaker extends SageMakerClient { } /** - *

                Stops a transform job.

                + *

                Stops a batch transform job.

                *

                When Amazon SageMaker receives a StopTransformJob request, the status of the job * changes to Stopping. After Amazon SageMaker * stops - * the job, the status is set to Stopped. When you stop a transform job before + * the job, the status is set to Stopped. When you stop a batch transform job before * it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.

                */ public stopTransformJob( diff --git a/clients/client-sagemaker/src/commands/DescribeModelBiasJobDefinitionCommand.ts b/clients/client-sagemaker/src/commands/DescribeModelBiasJobDefinitionCommand.ts index 7fb2ae8bbaef..a5aa2b936fec 100644 --- a/clients/client-sagemaker/src/commands/DescribeModelBiasJobDefinitionCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeModelBiasJobDefinitionCommand.ts @@ -11,8 +11,7 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { DescribeModelBiasJobDefinitionRequest } from "../models/models_1"; -import { DescribeModelBiasJobDefinitionResponse } from "../models/models_2"; +import { DescribeModelBiasJobDefinitionRequest, DescribeModelBiasJobDefinitionResponse } from "../models/models_2"; import { deserializeAws_json1_1DescribeModelBiasJobDefinitionCommand, serializeAws_json1_1DescribeModelBiasJobDefinitionCommand, diff --git a/clients/client-sagemaker/src/commands/DescribeModelCommand.ts b/clients/client-sagemaker/src/commands/DescribeModelCommand.ts index 036056a24cc5..dee45a9fd0df 100644 --- a/clients/client-sagemaker/src/commands/DescribeModelCommand.ts +++ b/clients/client-sagemaker/src/commands/DescribeModelCommand.ts @@ -11,7 +11,8 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { DescribeModelInput, DescribeModelOutput } from "../models/models_1"; +import { DescribeModelInput } from "../models/models_1"; +import { DescribeModelOutput } from "../models/models_2"; import { deserializeAws_json1_1DescribeModelCommand, serializeAws_json1_1DescribeModelCommand, diff --git a/clients/client-sagemaker/src/commands/ListTrainingJobsForHyperParameterTuningJobCommand.ts b/clients/client-sagemaker/src/commands/ListTrainingJobsForHyperParameterTuningJobCommand.ts index 2550b435f6b7..6b5a82e37554 100644 --- a/clients/client-sagemaker/src/commands/ListTrainingJobsForHyperParameterTuningJobCommand.ts +++ b/clients/client-sagemaker/src/commands/ListTrainingJobsForHyperParameterTuningJobCommand.ts @@ -11,10 +11,8 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { - ListTrainingJobsForHyperParameterTuningJobRequest, - ListTrainingJobsForHyperParameterTuningJobResponse, -} from "../models/models_2"; +import { ListTrainingJobsForHyperParameterTuningJobRequest } from "../models/models_2"; +import { ListTrainingJobsForHyperParameterTuningJobResponse } from "../models/models_3"; import { deserializeAws_json1_1ListTrainingJobsForHyperParameterTuningJobCommand, serializeAws_json1_1ListTrainingJobsForHyperParameterTuningJobCommand, diff --git a/clients/client-sagemaker/src/commands/ListTransformJobsCommand.ts b/clients/client-sagemaker/src/commands/ListTransformJobsCommand.ts index 4dd115b3ee0a..38d93f3b49e8 100644 --- a/clients/client-sagemaker/src/commands/ListTransformJobsCommand.ts +++ b/clients/client-sagemaker/src/commands/ListTransformJobsCommand.ts @@ -11,8 +11,7 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { ListTransformJobsRequest } from "../models/models_2"; -import { ListTransformJobsResponse } from "../models/models_3"; +import { ListTransformJobsRequest, ListTransformJobsResponse } from "../models/models_3"; import { deserializeAws_json1_1ListTransformJobsCommand, serializeAws_json1_1ListTransformJobsCommand, diff --git a/clients/client-sagemaker/src/commands/StopPipelineExecutionCommand.ts b/clients/client-sagemaker/src/commands/StopPipelineExecutionCommand.ts index f5e11d1cb7cd..4ac1ed30faa1 100644 --- a/clients/client-sagemaker/src/commands/StopPipelineExecutionCommand.ts +++ b/clients/client-sagemaker/src/commands/StopPipelineExecutionCommand.ts @@ -29,14 +29,14 @@ export interface StopPipelineExecutionCommandOutput extends StopPipelineExecutio *

                *

                A pipeline execution won't stop while a callback step is running. * When you call StopPipelineExecution - * on a pipeline execution with a running callback step, SageMaker Pipelines sends an + * on a pipeline execution with a running callback step, Amazon SageMaker Pipelines sends an * additional Amazon SQS message to the specified SQS queue. The body of the SQS message * contains a "Status" field which is set to "Stopping".

                *

                You should add logic to your Amazon SQS message consumer to take any needed action (for * example, resource cleanup) upon receipt of the message followed by a call to * SendPipelineExecutionStepSuccess or * SendPipelineExecutionStepFailure.

                - *

                Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.

                + *

                Only when Amazon SageMaker Pipelines receives one of these calls will it stop the pipeline execution.

                * *

                * Lambda Step diff --git a/clients/client-sagemaker/src/commands/StopTransformJobCommand.ts b/clients/client-sagemaker/src/commands/StopTransformJobCommand.ts index afb9f7c8e81d..116f826e812f 100644 --- a/clients/client-sagemaker/src/commands/StopTransformJobCommand.ts +++ b/clients/client-sagemaker/src/commands/StopTransformJobCommand.ts @@ -22,11 +22,11 @@ export interface StopTransformJobCommandInput extends StopTransformJobRequest {} export interface StopTransformJobCommandOutput extends __MetadataBearer {} /** - *

                Stops a transform job.

                + *

                Stops a batch transform job.

                *

                When Amazon SageMaker receives a StopTransformJob request, the status of the job * changes to Stopping. After Amazon SageMaker * stops - * the job, the status is set to Stopped. When you stop a transform job before + * the job, the status is set to Stopped. When you stop a batch transform job before * it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.

                * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-sagemaker/src/models/models_0.ts b/clients/client-sagemaker/src/models/models_0.ts index d88cce2a6914..bb01054f3367 100644 --- a/clients/client-sagemaker/src/models/models_0.ts +++ b/clients/client-sagemaker/src/models/models_0.ts @@ -1247,6 +1247,14 @@ export enum TrainingInstanceType { ML_G4DN_4XLARGE = "ml.g4dn.4xlarge", ML_G4DN_8XLARGE = "ml.g4dn.8xlarge", ML_G4DN_XLARGE = "ml.g4dn.xlarge", + ML_G5_12XLARGE = "ml.g5.12xlarge", + ML_G5_16XLARGE = "ml.g5.16xlarge", + ML_G5_24XLARGE = "ml.g5.24xlarge", + ML_G5_2XLARGE = "ml.g5.2xlarge", + ML_G5_48XLARGE = "ml.g5.48xlarge", + ML_G5_4XLARGE = "ml.g5.4xlarge", + ML_G5_8XLARGE = "ml.g5.8xlarge", + ML_G5_XLARGE = "ml.g5.xlarge", ML_M4_10XLARGE = "ml.m4.10xlarge", ML_M4_16XLARGE = "ml.m4.16xlarge", ML_M4_2XLARGE = "ml.m4.2xlarge", @@ -4506,7 +4514,7 @@ export interface AutoMLChannel { /** *

                The content type of the data from the input source. You can use - * text/csv;header=present or x-application/vnd.amazon+parquet. + * text/csv;header=present or x-application/vnd.amazon+parquet. * The default value is text/csv;header=present.

                */ ContentType?: string; @@ -7564,11 +7572,10 @@ export interface InputConfig { Framework: Framework | string | undefined; /** - *

                Specifies the framework version to use.

                - *

                This API field is only supported for PyTorch framework versions 1.4, - * 1.5, and 1.6 for - * cloud instance target devices: ml_c4, ml_c5, ml_m4, - * ml_m5, ml_p2, ml_p3, and ml_g4dn.

                + *

                Specifies the framework version to use. This API field is only supported for the PyTorch and TensorFlow frameworks.

                + *

                For information about framework versions supported for cloud targets and edge devices, see + * Cloud Supported Instance Types and Frameworks and + * Edge Supported Frameworks.

                */ FrameworkVersion?: string; } @@ -9188,7 +9195,7 @@ export interface CreateDomainRequest { /** * @deprecated * - *

                This member is deprecated and replaced with KmsKeyId.

                + *

                Use KmsKeyId.

                */ HomeEfsFileSystemKmsKeyId?: string; diff --git a/clients/client-sagemaker/src/models/models_1.ts b/clients/client-sagemaker/src/models/models_1.ts index 16b0f0db976c..073493b6db22 100644 --- a/clients/client-sagemaker/src/models/models_1.ts +++ b/clients/client-sagemaker/src/models/models_1.ts @@ -4524,6 +4524,58 @@ export namespace CreateNotebookInstanceLifecycleConfigOutput { }); } +/** + *

                Configuration that controls the parallelism of the pipeline. + * By default, the parallelism configuration specified applies to all + * executions of the pipeline unless overridden.

                + */ +export interface ParallelismConfiguration { + /** + *

                The max number of steps that can be executed in parallel.

                + */ + MaxParallelExecutionSteps: number | undefined; +} + +export namespace ParallelismConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ParallelismConfiguration): any => ({ + ...obj, + }); +} + +/** + *

                The location of the pipeline definition stored in Amazon S3.

                + */ +export interface PipelineDefinitionS3Location { + /** + *

                Name of the S3 bucket.

                + */ + Bucket: string | undefined; + + /** + *

                The object key (or key name) uniquely identifies the + * object in an S3 bucket.

                + */ + ObjectKey: string | undefined; + + /** + *

                Version Id of the pipeline definition file. If not specified, Amazon SageMaker + * will retrieve the latest version.

                + */ + VersionId?: string; +} + +export namespace PipelineDefinitionS3Location { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PipelineDefinitionS3Location): any => ({ + ...obj, + }); +} + export interface CreatePipelineRequest { /** *

                The name of the pipeline.

                @@ -4538,7 +4590,13 @@ export interface CreatePipelineRequest { /** *

                The JSON pipeline definition of the pipeline.

                */ - PipelineDefinition: string | undefined; + PipelineDefinition?: string; + + /** + *

                The location of the pipeline definition stored in Amazon S3. If specified, + * SageMaker will retrieve the pipeline definition from this location.

                + */ + PipelineDefinitionS3Location?: PipelineDefinitionS3Location; /** *

                A description of the pipeline.

                @@ -4560,6 +4618,12 @@ export interface CreatePipelineRequest { *

                A list of tags to apply to the created pipeline.

                */ Tags?: Tag[]; + + /** + *

                This is the configuration that controls the parallelism of the pipeline. + * If specified, it applies to all runs of this pipeline by default.

                + */ + ParallelismConfiguration?: ParallelismConfiguration; } export namespace CreatePipelineRequest { @@ -9126,7 +9190,7 @@ export interface DescribeDomainResponse { /** * @deprecated * - *

                This member is deprecated and replaced with KmsKeyId.

                + *

                Use KmsKeyId.

                */ HomeEfsFileSystemKmsKeyId?: string; @@ -11368,83 +11432,3 @@ export namespace DescribeModelInput { ...obj, }); } - -export interface DescribeModelOutput { - /** - *

                Name of the Amazon SageMaker model.

                - */ - ModelName: string | undefined; - - /** - *

                The location of the primary inference code, associated artifacts, and custom - * environment map that the inference code uses when it is deployed in production. - *

                - */ - PrimaryContainer?: ContainerDefinition; - - /** - *

                The containers in the inference pipeline.

                - */ - Containers?: ContainerDefinition[]; - - /** - *

                Specifies details of how containers in a multi-container endpoint are called.

                - */ - InferenceExecutionConfig?: InferenceExecutionConfig; - - /** - *

                The Amazon Resource Name (ARN) of the IAM role that you specified for the - * model.

                - */ - ExecutionRoleArn: string | undefined; - - /** - *

                A VpcConfig object that specifies the VPC that this model has access - * to. For more information, see Protect Endpoints by Using an Amazon Virtual - * Private Cloud - *

                - */ - VpcConfig?: VpcConfig; - - /** - *

                A timestamp that shows when the model was created.

                - */ - CreationTime: Date | undefined; - - /** - *

                The Amazon Resource Name (ARN) of the model.

                - */ - ModelArn: string | undefined; - - /** - *

                If True, no inbound or outbound network calls can be made to or from the - * model container.

                - */ - EnableNetworkIsolation?: boolean; -} - -export namespace DescribeModelOutput { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeModelOutput): any => ({ - ...obj, - }); -} - -export interface DescribeModelBiasJobDefinitionRequest { - /** - *

                The name of the model bias job definition. The name must be unique within an Amazon Web Services Region - * in the Amazon Web Services account.

                - */ - JobDefinitionName: string | undefined; -} - -export namespace DescribeModelBiasJobDefinitionRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-sagemaker/src/models/models_2.ts b/clients/client-sagemaker/src/models/models_2.ts index 5bdea943b865..e9f5b0b82a99 100644 --- a/clients/client-sagemaker/src/models/models_2.ts +++ b/clients/client-sagemaker/src/models/models_2.ts @@ -33,6 +33,7 @@ import { CompilationJobStatus, CompilationJobSummary, ConditionStepMetadata, + ContainerDefinition, ContextSummary, EdgeOutputConfig, FeatureDefinition, @@ -78,10 +79,10 @@ import { ExperimentSource, FeatureGroupStatus, FlowDefinitionStatus, - HyperParameterTrainingJobSummary, HyperParameterTuningJobStatus, ImageStatus, ImageVersionStatus, + InferenceExecutionConfig, LabelCounters, LabelingJobInputConfig, LabelingJobOutput, @@ -109,6 +110,7 @@ import { ObjectiveStatusCounters, OfflineStoreStatus, OfflineStoreStatusValue, + ParallelismConfiguration, ProcessingInput, ProcessingOutputConfig, ProcessingResources, @@ -132,6 +134,86 @@ import { TrialComponentStatus, } from "./models_1"; +export interface DescribeModelOutput { + /** + *

                Name of the Amazon SageMaker model.

                + */ + ModelName: string | undefined; + + /** + *

                The location of the primary inference code, associated artifacts, and custom + * environment map that the inference code uses when it is deployed in production. + *

                + */ + PrimaryContainer?: ContainerDefinition; + + /** + *

                The containers in the inference pipeline.

                + */ + Containers?: ContainerDefinition[]; + + /** + *

                Specifies details of how containers in a multi-container endpoint are called.

                + */ + InferenceExecutionConfig?: InferenceExecutionConfig; + + /** + *

                The Amazon Resource Name (ARN) of the IAM role that you specified for the + * model.

                + */ + ExecutionRoleArn: string | undefined; + + /** + *

                A VpcConfig object that specifies the VPC that this model has access + * to. For more information, see Protect Endpoints by Using an Amazon Virtual + * Private Cloud + *

                + */ + VpcConfig?: VpcConfig; + + /** + *

                A timestamp that shows when the model was created.

                + */ + CreationTime: Date | undefined; + + /** + *

                The Amazon Resource Name (ARN) of the model.

                + */ + ModelArn: string | undefined; + + /** + *

                If True, no inbound or outbound network calls can be made to or from the + * model container.

                + */ + EnableNetworkIsolation?: boolean; +} + +export namespace DescribeModelOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeModelOutput): any => ({ + ...obj, + }); +} + +export interface DescribeModelBiasJobDefinitionRequest { + /** + *

                The name of the model bias job definition. The name must be unique within an Amazon Web Services Region + * in the Amazon Web Services account.

                + */ + JobDefinitionName: string | undefined; +} + +export namespace DescribeModelBiasJobDefinitionRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeModelBiasJobDefinitionRequest): any => ({ + ...obj, + }); +} + export interface DescribeModelBiasJobDefinitionResponse { /** *

                The Amazon Resource Name (ARN) of the model bias job.

                @@ -1161,6 +1243,11 @@ export interface DescribePipelineResponse { * component, lineage group, or project.

                */ LastModifiedBy?: UserContext; + + /** + *

                Lists the parallelism configuration applied to the pipeline.

                + */ + ParallelismConfiguration?: ParallelismConfiguration; } export namespace DescribePipelineResponse { @@ -1314,6 +1401,11 @@ export interface DescribePipelineExecutionResponse { * component, lineage group, or project.

                */ LastModifiedBy?: UserContext; + + /** + *

                The parallelism configuration applied to the pipeline.

                + */ + ParallelismConfiguration?: ParallelismConfiguration; } export namespace DescribePipelineExecutionResponse { @@ -3822,6 +3914,41 @@ export namespace EdgePackagingJobSummary { }); } +/** + *

                The configurations and outcomes of an Amazon EMR step execution.

                + */ +export interface EMRStepMetadata { + /** + *

                The identifier of the EMR cluster.

                + */ + ClusterId?: string; + + /** + *

                The identifier of the EMR cluster step.

                + */ + StepId?: string; + + /** + *

                The name of the EMR cluster step.

                + */ + StepName?: string; + + /** + *

                The path to the log file where the cluster step's failure root cause + * is recorded.

                + */ + LogFilePath?: string; +} + +export namespace EMRStepMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EMRStepMetadata): any => ({ + ...obj, + }); +} + export interface EnableSagemakerServicecatalogPortfolioInput {} export namespace EnableSagemakerServicecatalogPortfolioInput { @@ -9718,6 +9845,11 @@ export interface PipelineExecutionStepMetadata { *
              */ ClarifyCheck?: ClarifyCheckStepMetadata; + + /** + *

              The configurations and outcomes of an EMR step execution.

              + */ + EMR?: EMRStepMetadata; } export namespace PipelineExecutionStepMetadata { @@ -9747,6 +9879,16 @@ export interface PipelineExecutionStep { */ StepName?: string; + /** + *

              The display name of the step.

              + */ + StepDisplayName?: string; + + /** + *

              The description of the step.

              + */ + StepDescription?: string; + /** *

              The time that the step started executing.

              */ @@ -9767,7 +9909,11 @@ export interface PipelineExecutionStep { */ CacheHitResult?: CacheHitResult; + /** + *

              The current attempt of the execution step. For more information, see Retry Policy for Amazon SageMaker Pipelines steps.

              + */ AttemptCount?: number; + /** *

              The reason why the step failed execution. This is only returned if the step failed its execution.

              */ @@ -9845,7 +9991,9 @@ export namespace ListPipelineParametersForExecutionRequest { */ export interface Parameter { /** - *

              The name of the parameter to assign a value to. This parameter name must match a named parameter in the pipeline definition.

              + *

              The name of the parameter to assign a value to. This + * parameter name must match a named parameter in the + * pipeline definition.

              */ Name: string | undefined; @@ -10712,153 +10860,3 @@ export namespace ListTrainingJobsForHyperParameterTuningJobRequest { ...obj, }); } - -export interface ListTrainingJobsForHyperParameterTuningJobResponse { - /** - *

              A list of TrainingJobSummary objects that - * describe - * the training jobs that the - * ListTrainingJobsForHyperParameterTuningJob request returned.

              - */ - TrainingJobSummaries: HyperParameterTrainingJobSummary[] | undefined; - - /** - *

              If the result of this ListTrainingJobsForHyperParameterTuningJob request - * was truncated, the response includes a NextToken. To retrieve the next set - * of training jobs, use the token in the next request.

              - */ - NextToken?: string; -} - -export namespace ListTrainingJobsForHyperParameterTuningJobResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListTrainingJobsForHyperParameterTuningJobResponse): any => ({ - ...obj, - }); -} - -export interface ListTransformJobsRequest { - /** - *

              A filter that returns only transform jobs created after the specified time.

              - */ - CreationTimeAfter?: Date; - - /** - *

              A filter that returns only transform jobs created before the specified time.

              - */ - CreationTimeBefore?: Date; - - /** - *

              A filter that returns only transform jobs modified after the specified time.

              - */ - LastModifiedTimeAfter?: Date; - - /** - *

              A filter that returns only transform jobs modified before the specified time.

              - */ - LastModifiedTimeBefore?: Date; - - /** - *

              A string in the transform job name. This filter returns only transform jobs whose name - * contains the specified string.

              - */ - NameContains?: string; - - /** - *

              A filter that retrieves only transform jobs with a specific status.

              - */ - StatusEquals?: TransformJobStatus | string; - - /** - *

              The field to sort results by. The default is CreationTime.

              - */ - SortBy?: SortBy | string; - - /** - *

              The sort order for results. The default is Descending.

              - */ - SortOrder?: SortOrder | string; - - /** - *

              If the result of the previous ListTransformJobs request was truncated, - * the response includes a NextToken. To retrieve the next set of transform - * jobs, use the token in the next request.

              - */ - NextToken?: string; - - /** - *

              The maximum number of transform jobs to return in the response. The default value is 10.

              - */ - MaxResults?: number; -} - -export namespace ListTransformJobsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListTransformJobsRequest): any => ({ - ...obj, - }); -} - -/** - *

              Provides a - * summary - * of a transform job. Multiple TransformJobSummary objects are returned as a - * list after in response to a ListTransformJobs call.

              - */ -export interface TransformJobSummary { - /** - *

              The name of the transform job.

              - */ - TransformJobName: string | undefined; - - /** - *

              The Amazon Resource Name (ARN) of the transform job.

              - */ - TransformJobArn: string | undefined; - - /** - *

              A timestamp that shows when the transform Job was created.

              - */ - CreationTime: Date | undefined; - - /** - *

              Indicates when the transform - * job - * ends on compute instances. For successful jobs and stopped jobs, this - * is the exact time - * recorded - * after the results are uploaded. For failed jobs, this is when Amazon SageMaker - * detected that the job failed.

              - */ - TransformEndTime?: Date; - - /** - *

              Indicates when the transform job was last modified.

              - */ - LastModifiedTime?: Date; - - /** - *

              The status of the transform job.

              - */ - TransformJobStatus: TransformJobStatus | string | undefined; - - /** - *

              If the transform job failed, - * the - * reason it failed.

              - */ - FailureReason?: string; -} - -export namespace TransformJobSummary { - /** - * @internal - */ - export const filterSensitiveLog = (obj: TransformJobSummary): any => ({ - ...obj, - }); -} diff --git a/clients/client-sagemaker/src/models/models_3.ts b/clients/client-sagemaker/src/models/models_3.ts index f6d70fa66e59..8f7f4014132f 100644 --- a/clients/client-sagemaker/src/models/models_3.ts +++ b/clients/client-sagemaker/src/models/models_3.ts @@ -36,6 +36,7 @@ import { DebugRuleEvaluationStatus, DriftCheckBaselines, ExperimentConfig, + HyperParameterTrainingJobSummary, MemberDefinition, ModelArtifacts, ModelClientConfig, @@ -47,6 +48,8 @@ import { NotebookInstanceLifecycleHook, NotificationConfiguration, OidcConfig, + ParallelismConfiguration, + PipelineDefinitionS3Location, ProcessingInput, ProcessingOutputConfig, ProcessingResources, @@ -88,9 +91,9 @@ import { SecondaryStatus, SecondaryStatusTransition, ServiceCatalogProvisionedProductDetails, + SortBy, SortOrder, TransformJobStatus, - TransformJobSummary, TrialComponentMetricSummary, TrialComponentSource, TrialSource, @@ -99,6 +102,156 @@ import { Workteam, } from "./models_2"; +export interface ListTrainingJobsForHyperParameterTuningJobResponse { + /** + *

              A list of TrainingJobSummary objects that + * describe + * the training jobs that the + * ListTrainingJobsForHyperParameterTuningJob request returned.

              + */ + TrainingJobSummaries: HyperParameterTrainingJobSummary[] | undefined; + + /** + *

              If the result of this ListTrainingJobsForHyperParameterTuningJob request + * was truncated, the response includes a NextToken. To retrieve the next set + * of training jobs, use the token in the next request.

              + */ + NextToken?: string; +} + +export namespace ListTrainingJobsForHyperParameterTuningJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTrainingJobsForHyperParameterTuningJobResponse): any => ({ + ...obj, + }); +} + +export interface ListTransformJobsRequest { + /** + *

              A filter that returns only transform jobs created after the specified time.

              + */ + CreationTimeAfter?: Date; + + /** + *

              A filter that returns only transform jobs created before the specified time.

              + */ + CreationTimeBefore?: Date; + + /** + *

              A filter that returns only transform jobs modified after the specified time.

              + */ + LastModifiedTimeAfter?: Date; + + /** + *

              A filter that returns only transform jobs modified before the specified time.

              + */ + LastModifiedTimeBefore?: Date; + + /** + *

              A string in the transform job name. This filter returns only transform jobs whose name + * contains the specified string.

              + */ + NameContains?: string; + + /** + *

              A filter that retrieves only transform jobs with a specific status.

              + */ + StatusEquals?: TransformJobStatus | string; + + /** + *

              The field to sort results by. The default is CreationTime.

              + */ + SortBy?: SortBy | string; + + /** + *

              The sort order for results. The default is Descending.

              + */ + SortOrder?: SortOrder | string; + + /** + *

              If the result of the previous ListTransformJobs request was truncated, + * the response includes a NextToken. To retrieve the next set of transform + * jobs, use the token in the next request.

              + */ + NextToken?: string; + + /** + *

              The maximum number of transform jobs to return in the response. The default value is 10.

              + */ + MaxResults?: number; +} + +export namespace ListTransformJobsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTransformJobsRequest): any => ({ + ...obj, + }); +} + +/** + *

              Provides a + * summary + * of a transform job. Multiple TransformJobSummary objects are returned as a + * list after in response to a ListTransformJobs call.

              + */ +export interface TransformJobSummary { + /** + *

              The name of the transform job.

              + */ + TransformJobName: string | undefined; + + /** + *

              The Amazon Resource Name (ARN) of the transform job.

              + */ + TransformJobArn: string | undefined; + + /** + *

              A timestamp that shows when the transform Job was created.

              + */ + CreationTime: Date | undefined; + + /** + *

              Indicates when the transform + * job + * ends on compute instances. For successful jobs and stopped jobs, this + * is the exact time + * recorded + * after the results are uploaded. For failed jobs, this is when Amazon SageMaker + * detected that the job failed.

              + */ + TransformEndTime?: Date; + + /** + *

              Indicates when the transform job was last modified.

              + */ + LastModifiedTime?: Date; + + /** + *

              The status of the transform job.

              + */ + TransformJobStatus: TransformJobStatus | string | undefined; + + /** + *

              If the transform job failed, + * the + * reason it failed.

              + */ + FailureReason?: string; +} + +export namespace TransformJobSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TransformJobSummary): any => ({ + ...obj, + }); +} + export interface ListTransformJobsResponse { /** *

              An array of @@ -1057,6 +1210,11 @@ export interface Pipeline { */ LastModifiedBy?: UserContext; + /** + *

              The parallelism configuration applied to the pipeline.

              + */ + ParallelismConfiguration?: ParallelismConfiguration; + /** *

              A list of tags that apply to the pipeline.

              */ @@ -1133,6 +1291,11 @@ export interface PipelineExecution { */ LastModifiedBy?: UserContext; + /** + *

              The parallelism configuration applied to the pipeline execution.

              + */ + ParallelismConfiguration?: ParallelismConfiguration; + /** *

              Contains a list of pipeline parameters. This list can be empty.

              */ @@ -1777,6 +1940,12 @@ export interface RetryPipelineExecutionRequest { * operation. An idempotent operation completes no more than once.

              */ ClientRequestToken?: string; + + /** + *

              This configuration, if specified, overrides the parallelism configuration + * of the parent pipeline.

              + */ + ParallelismConfiguration?: ParallelismConfiguration; } export namespace RetryPipelineExecutionRequest { @@ -2896,6 +3065,12 @@ export interface StartPipelineExecutionRequest { * operation. An idempotent operation completes no more than once.

              */ ClientRequestToken?: string; + + /** + *

              This configuration, if specified, overrides the parallelism configuration + * of the parent pipeline for this specific run.

              + */ + ParallelismConfiguration?: ParallelismConfiguration; } export namespace StartPipelineExecutionRequest { @@ -3123,7 +3298,7 @@ export namespace StopTrainingJobRequest { export interface StopTransformJobRequest { /** - *

              The name of the transform job to stop.

              + *

              The name of the batch transform job to stop.

              */ TransformJobName: string | undefined; } @@ -3998,6 +4173,12 @@ export interface UpdatePipelineRequest { */ PipelineDefinition?: string; + /** + *

              The location of the pipeline definition stored in Amazon S3. If specified, + * SageMaker will retrieve the pipeline definition from this location.

              + */ + PipelineDefinitionS3Location?: PipelineDefinitionS3Location; + /** *

              The description of the pipeline.

              */ @@ -4007,6 +4188,11 @@ export interface UpdatePipelineRequest { *

              The Amazon Resource Name (ARN) that the pipeline uses to execute.

              */ RoleArn?: string; + + /** + *

              If specified, it applies to all executions of this pipeline by default.

              + */ + ParallelismConfiguration?: ParallelismConfiguration; } export namespace UpdatePipelineRequest { @@ -4049,6 +4235,12 @@ export interface UpdatePipelineExecutionRequest { *

              The display name of the pipeline execution.

              */ PipelineExecutionDisplayName?: string; + + /** + *

              This configuration, if specified, overrides the parallelism configuration + * of the parent pipeline for this specific run.

              + */ + ParallelismConfiguration?: ParallelismConfiguration; } export namespace UpdatePipelineExecutionRequest { diff --git a/clients/client-sagemaker/src/protocols/Aws_json1_1.ts b/clients/client-sagemaker/src/protocols/Aws_json1_1.ts index 6b0efba62362..da0a92774f46 100644 --- a/clients/client-sagemaker/src/protocols/Aws_json1_1.ts +++ b/clients/client-sagemaker/src/protocols/Aws_json1_1.ts @@ -1040,9 +1040,7 @@ import { DescribeLabelingJobResponse, DescribeLineageGroupRequest, DescribeLineageGroupResponse, - DescribeModelBiasJobDefinitionRequest, DescribeModelInput, - DescribeModelOutput, DriftCheckBaselines, DriftCheckBias, DriftCheckExplainability, @@ -1108,9 +1106,11 @@ import { OfflineStoreStatus, OidcConfig, OidcMemberDefinition, + ParallelismConfiguration, PendingDeploymentSummary, PendingProductionVariantSummary, Phase, + PipelineDefinitionS3Location, ProcessingClusterConfig, ProcessingFeatureStoreOutput, ProcessingInput, @@ -1147,9 +1147,11 @@ import { UiTemplateInfo, } from "../models/models_1"; import { + DescribeModelBiasJobDefinitionRequest, DescribeModelBiasJobDefinitionResponse, DescribeModelExplainabilityJobDefinitionRequest, DescribeModelExplainabilityJobDefinitionResponse, + DescribeModelOutput, DescribeModelPackageGroupInput, DescribeModelPackageGroupOutput, DescribeModelPackageInput, @@ -1205,6 +1207,7 @@ import { EdgeModelStat, EdgeModelSummary, EdgePackagingJobSummary, + EMRStepMetadata, EnableSagemakerServicecatalogPortfolioInput, EnableSagemakerServicecatalogPortfolioOutput, Endpoint, @@ -1337,10 +1340,8 @@ import { ListTagsInput, ListTagsOutput, ListTrainingJobsForHyperParameterTuningJobRequest, - ListTrainingJobsForHyperParameterTuningJobResponse, ListTrainingJobsRequest, ListTrainingJobsResponse, - ListTransformJobsRequest, MetricData, ModelMetadataFilter, ModelMetadataSearchExpression, @@ -1381,7 +1382,6 @@ import { TrainingJobStepMetadata, TrainingJobSummary, TransformJobStepMetadata, - TransformJobSummary, TrialComponentMetricSummary, TrialComponentSource, TrialSource, @@ -1390,6 +1390,8 @@ import { Workteam, } from "../models/models_2"; import { + ListTrainingJobsForHyperParameterTuningJobResponse, + ListTransformJobsRequest, ListTransformJobsResponse, ListTrialComponentsRequest, ListTrialComponentsResponse, @@ -1450,6 +1452,7 @@ import { StopTransformJobRequest, TrainingJob, TransformJob, + TransformJobSummary, Trial, TrialComponent, TrialComponentSimpleSummary, @@ -19667,8 +19670,19 @@ const serializeAws_json1_1CreateNotebookInstanceLifecycleConfigInput = ( const serializeAws_json1_1CreatePipelineRequest = (input: CreatePipelineRequest, context: __SerdeContext): any => { return { ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.ParallelismConfiguration !== undefined && + input.ParallelismConfiguration !== null && { + ParallelismConfiguration: serializeAws_json1_1ParallelismConfiguration(input.ParallelismConfiguration, context), + }), ...(input.PipelineDefinition !== undefined && input.PipelineDefinition !== null && { PipelineDefinition: input.PipelineDefinition }), + ...(input.PipelineDefinitionS3Location !== undefined && + input.PipelineDefinitionS3Location !== null && { + PipelineDefinitionS3Location: serializeAws_json1_1PipelineDefinitionS3Location( + input.PipelineDefinitionS3Location, + context + ), + }), ...(input.PipelineDescription !== undefined && input.PipelineDescription !== null && { PipelineDescription: input.PipelineDescription }), ...(input.PipelineDisplayName !== undefined && @@ -24184,6 +24198,16 @@ const serializeAws_json1_1OutputParameterList = (input: OutputParameter[], conte }); }; +const serializeAws_json1_1ParallelismConfiguration = ( + input: ParallelismConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.MaxParallelExecutionSteps !== undefined && + input.MaxParallelExecutionSteps !== null && { MaxParallelExecutionSteps: input.MaxParallelExecutionSteps }), + }; +}; + const serializeAws_json1_1Parameter = (input: Parameter, context: __SerdeContext): any => { return { ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), @@ -24307,6 +24331,17 @@ const serializeAws_json1_1Phases = (input: Phase[], context: __SerdeContext): an }); }; +const serializeAws_json1_1PipelineDefinitionS3Location = ( + input: PipelineDefinitionS3Location, + context: __SerdeContext +): any => { + return { + ...(input.Bucket !== undefined && input.Bucket !== null && { Bucket: input.Bucket }), + ...(input.ObjectKey !== undefined && input.ObjectKey !== null && { ObjectKey: input.ObjectKey }), + ...(input.VersionId !== undefined && input.VersionId !== null && { VersionId: input.VersionId }), + }; +}; + const serializeAws_json1_1ProcessingClusterConfig = (input: ProcessingClusterConfig, context: __SerdeContext): any => { return { ...(input.InstanceCount !== undefined && input.InstanceCount !== null && { InstanceCount: input.InstanceCount }), @@ -24875,6 +24910,10 @@ const serializeAws_json1_1RetryPipelineExecutionRequest = ( ): any => { return { ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.ParallelismConfiguration !== undefined && + input.ParallelismConfiguration !== null && { + ParallelismConfiguration: serializeAws_json1_1ParallelismConfiguration(input.ParallelismConfiguration, context), + }), ...(input.PipelineExecutionArn !== undefined && input.PipelineExecutionArn !== null && { PipelineExecutionArn: input.PipelineExecutionArn }), }; @@ -25159,6 +25198,10 @@ const serializeAws_json1_1StartPipelineExecutionRequest = ( ): any => { return { ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.ParallelismConfiguration !== undefined && + input.ParallelismConfiguration !== null && { + ParallelismConfiguration: serializeAws_json1_1ParallelismConfiguration(input.ParallelismConfiguration, context), + }), ...(input.PipelineExecutionDescription !== undefined && input.PipelineExecutionDescription !== null && { PipelineExecutionDescription: input.PipelineExecutionDescription, @@ -25957,6 +26000,10 @@ const serializeAws_json1_1UpdatePipelineExecutionRequest = ( context: __SerdeContext ): any => { return { + ...(input.ParallelismConfiguration !== undefined && + input.ParallelismConfiguration !== null && { + ParallelismConfiguration: serializeAws_json1_1ParallelismConfiguration(input.ParallelismConfiguration, context), + }), ...(input.PipelineExecutionArn !== undefined && input.PipelineExecutionArn !== null && { PipelineExecutionArn: input.PipelineExecutionArn }), ...(input.PipelineExecutionDescription !== undefined && @@ -25972,8 +26019,19 @@ const serializeAws_json1_1UpdatePipelineExecutionRequest = ( const serializeAws_json1_1UpdatePipelineRequest = (input: UpdatePipelineRequest, context: __SerdeContext): any => { return { + ...(input.ParallelismConfiguration !== undefined && + input.ParallelismConfiguration !== null && { + ParallelismConfiguration: serializeAws_json1_1ParallelismConfiguration(input.ParallelismConfiguration, context), + }), ...(input.PipelineDefinition !== undefined && input.PipelineDefinition !== null && { PipelineDefinition: input.PipelineDefinition }), + ...(input.PipelineDefinitionS3Location !== undefined && + input.PipelineDefinitionS3Location !== null && { + PipelineDefinitionS3Location: serializeAws_json1_1PipelineDefinitionS3Location( + input.PipelineDefinitionS3Location, + context + ), + }), ...(input.PipelineDescription !== undefined && input.PipelineDescription !== null && { PipelineDescription: input.PipelineDescription }), ...(input.PipelineDisplayName !== undefined && @@ -29647,6 +29705,10 @@ const deserializeAws_json1_1DescribePipelineExecutionResponse = ( output.LastModifiedTime !== undefined && output.LastModifiedTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastModifiedTime))) : undefined, + ParallelismConfiguration: + output.ParallelismConfiguration !== undefined && output.ParallelismConfiguration !== null + ? deserializeAws_json1_1ParallelismConfiguration(output.ParallelismConfiguration, context) + : undefined, PipelineArn: __expectString(output.PipelineArn), PipelineExecutionArn: __expectString(output.PipelineExecutionArn), PipelineExecutionDescription: __expectString(output.PipelineExecutionDescription), @@ -29684,6 +29746,10 @@ const deserializeAws_json1_1DescribePipelineResponse = ( output.LastRunTime !== undefined && output.LastRunTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastRunTime))) : undefined, + ParallelismConfiguration: + output.ParallelismConfiguration !== undefined && output.ParallelismConfiguration !== null + ? deserializeAws_json1_1ParallelismConfiguration(output.ParallelismConfiguration, context) + : undefined, PipelineArn: __expectString(output.PipelineArn), PipelineDefinition: __expectString(output.PipelineDefinition), PipelineDescription: __expectString(output.PipelineDescription), @@ -30521,6 +30587,15 @@ const deserializeAws_json1_1Edges = (output: any, context: __SerdeContext): Edge }); }; +const deserializeAws_json1_1EMRStepMetadata = (output: any, context: __SerdeContext): EMRStepMetadata => { + return { + ClusterId: __expectString(output.ClusterId), + LogFilePath: __expectString(output.LogFilePath), + StepId: __expectString(output.StepId), + StepName: __expectString(output.StepName), + } as any; +}; + const deserializeAws_json1_1EnableSagemakerServicecatalogPortfolioOutput = ( output: any, context: __SerdeContext @@ -34117,6 +34192,15 @@ const deserializeAws_json1_1OutputParameterList = (output: any, context: __Serde }); }; +const deserializeAws_json1_1ParallelismConfiguration = ( + output: any, + context: __SerdeContext +): ParallelismConfiguration => { + return { + MaxParallelExecutionSteps: __expectInt32(output.MaxParallelExecutionSteps), + } as any; +}; + const deserializeAws_json1_1Parameter = (output: any, context: __SerdeContext): Parameter => { return { Name: __expectString(output.Name), @@ -34332,6 +34416,10 @@ const deserializeAws_json1_1Pipeline = (output: any, context: __SerdeContext): P output.LastRunTime !== undefined && output.LastRunTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastRunTime))) : undefined, + ParallelismConfiguration: + output.ParallelismConfiguration !== undefined && output.ParallelismConfiguration !== null + ? deserializeAws_json1_1ParallelismConfiguration(output.ParallelismConfiguration, context) + : undefined, PipelineArn: __expectString(output.PipelineArn), PipelineDescription: __expectString(output.PipelineDescription), PipelineDisplayName: __expectString(output.PipelineDisplayName), @@ -34364,6 +34452,10 @@ const deserializeAws_json1_1PipelineExecution = (output: any, context: __SerdeCo output.LastModifiedTime !== undefined && output.LastModifiedTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastModifiedTime))) : undefined, + ParallelismConfiguration: + output.ParallelismConfiguration !== undefined && output.ParallelismConfiguration !== null + ? deserializeAws_json1_1ParallelismConfiguration(output.ParallelismConfiguration, context) + : undefined, PipelineArn: __expectString(output.PipelineArn), PipelineExecutionArn: __expectString(output.PipelineExecutionArn), PipelineExecutionDescription: __expectString(output.PipelineExecutionDescription), @@ -34400,6 +34492,8 @@ const deserializeAws_json1_1PipelineExecutionStep = (output: any, context: __Ser output.StartTime !== undefined && output.StartTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.StartTime))) : undefined, + StepDescription: __expectString(output.StepDescription), + StepDisplayName: __expectString(output.StepDisplayName), StepName: __expectString(output.StepName), StepStatus: __expectString(output.StepStatus), } as any; @@ -34436,6 +34530,10 @@ const deserializeAws_json1_1PipelineExecutionStepMetadata = ( output.Condition !== undefined && output.Condition !== null ? deserializeAws_json1_1ConditionStepMetadata(output.Condition, context) : undefined, + EMR: + output.EMR !== undefined && output.EMR !== null + ? deserializeAws_json1_1EMRStepMetadata(output.EMR, context) + : undefined, Lambda: output.Lambda !== undefined && output.Lambda !== null ? deserializeAws_json1_1LambdaStepMetadata(output.Lambda, context) diff --git a/clients/client-snowball/src/models/models_0.ts b/clients/client-snowball/src/models/models_0.ts index 100660a89c0e..99ef61c6871f 100644 --- a/clients/client-snowball/src/models/models_0.ts +++ b/clients/client-snowball/src/models/models_0.ts @@ -1226,7 +1226,11 @@ export namespace CreateLongTermPricingResult { export interface ConflictException extends __SmithyException, $MetadataBearer { name: "ConflictException"; $fault: "client"; + /** + *

              You get this resource when you call CreateReturnShippingLabel more than once when other requests are not completed. .

              + */ ConflictResource?: string; + Message?: string; } diff --git a/codegen/sdk-codegen/aws-models/appstream.json b/codegen/sdk-codegen/aws-models/appstream.json index c218402ef13d..a014166afc16 100644 --- a/codegen/sdk-codegen/aws-models/appstream.json +++ b/codegen/sdk-codegen/aws-models/appstream.json @@ -186,6 +186,21 @@ "target": "com.amazonaws.appstream#AppBlock" } }, + "com.amazonaws.appstream#AppVisibility": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ALL", + "name": "ALL" + }, + { + "value": "ASSOCIATED", + "name": "ASSOCIATED" + } + ] + } + }, "com.amazonaws.appstream#Application": { "type": "structure", "members": { @@ -478,6 +493,62 @@ } } }, + "com.amazonaws.appstream#AssociateApplicationToEntitlement": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#AssociateApplicationToEntitlementRequest" + }, + "output": { + "target": "com.amazonaws.appstream#AssociateApplicationToEntitlementResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#EntitlementNotFoundException" + }, + { + "target": "com.amazonaws.appstream#LimitExceededException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

              Associates an application to entitle.

              " + } + }, + "com.amazonaws.appstream#AssociateApplicationToEntitlementRequest": { + "type": "structure", + "members": { + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the stack.

              ", + "smithy.api#required": {} + } + }, + "EntitlementName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the entitlement.

              ", + "smithy.api#required": {} + } + }, + "ApplicationIdentifier": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

              The identifier of the application.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.appstream#AssociateApplicationToEntitlementResult": { + "type": "structure", + "members": {} + }, "com.amazonaws.appstream#AssociateFleet": { "type": "operation", "input": { @@ -1064,6 +1135,82 @@ } } }, + "com.amazonaws.appstream#CreateEntitlement": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#CreateEntitlementRequest" + }, + "output": { + "target": "com.amazonaws.appstream#CreateEntitlementResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#EntitlementAlreadyExistsException" + }, + { + "target": "com.amazonaws.appstream#LimitExceededException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

              Creates a new entitlement. Entitlements control access to specific applications within\n a stack, based on user attributes. Entitlements apply to SAML 2.0 federated user\n identities. Amazon AppStream 2.0 user pool and streaming URL users are entitled to all\n applications in a stack. Entitlements don't apply to the desktop stream view\n application, or to applications managed by a dynamic app provider using the Dynamic\n Application Framework.

              " + } + }, + "com.amazonaws.appstream#CreateEntitlementRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the entitlement.

              ", + "smithy.api#required": {} + } + }, + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the stack with which the entitlement is associated.

              ", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.appstream#Description", + "traits": { + "smithy.api#documentation": "

              The description of the entitlement.

              " + } + }, + "AppVisibility": { + "target": "com.amazonaws.appstream#AppVisibility", + "traits": { + "smithy.api#documentation": "

              Specifies whether all or selected apps are entitled.

              ", + "smithy.api#required": {} + } + }, + "Attributes": { + "target": "com.amazonaws.appstream#EntitlementAttributeList", + "traits": { + "smithy.api#documentation": "

              The attributes of the entitlement.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.appstream#CreateEntitlementResult": { + "type": "structure", + "members": { + "Entitlement": { + "target": "com.amazonaws.appstream#Entitlement", + "traits": { + "smithy.api#documentation": "

              The entitlement.

              " + } + } + } + }, "com.amazonaws.appstream#CreateFleet": { "type": "operation", "input": { @@ -1975,6 +2122,55 @@ "type": "structure", "members": {} }, + "com.amazonaws.appstream#DeleteEntitlement": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#DeleteEntitlementRequest" + }, + "output": { + "target": "com.amazonaws.appstream#DeleteEntitlementResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.appstream#EntitlementNotFoundException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

              Deletes the specified entitlement.

              " + } + }, + "com.amazonaws.appstream#DeleteEntitlementRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the entitlement.

              ", + "smithy.api#required": {} + } + }, + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the stack with which the entitlement is associated.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.appstream#DeleteEntitlementResult": { + "type": "structure", + "members": {} + }, "com.amazonaws.appstream#DeleteFleet": { "type": "operation", "input": { @@ -2164,6 +2360,9 @@ { "target": "com.amazonaws.appstream#ConcurrentModificationException" }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, { "target": "com.amazonaws.appstream#ResourceInUseException" }, @@ -2502,6 +2701,76 @@ } } }, + "com.amazonaws.appstream#DescribeEntitlements": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#DescribeEntitlementsRequest" + }, + "output": { + "target": "com.amazonaws.appstream#DescribeEntitlementsResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#EntitlementNotFoundException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

              Retrieves a list that describes one of more entitlements.

              " + } + }, + "com.amazonaws.appstream#DescribeEntitlementsRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the entitlement.

              " + } + }, + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the stack with which the entitlement is associated.

              ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

              The pagination token used to retrieve the next page of results for this operation.

              " + } + }, + "MaxResults": { + "target": "com.amazonaws.appstream#Integer", + "traits": { + "smithy.api#documentation": "

              The maximum size of each page of results.

              " + } + } + } + }, + "com.amazonaws.appstream#DescribeEntitlementsResult": { + "type": "structure", + "members": { + "Entitlements": { + "target": "com.amazonaws.appstream#EntitlementList", + "traits": { + "smithy.api#documentation": "

              The entitlements.

              " + } + }, + "NextToken": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

              The pagination token used to retrieve the next page of results for this\n operation.

              " + } + } + } + }, "com.amazonaws.appstream#DescribeFleets": { "type": "operation", "input": { @@ -3305,6 +3574,59 @@ "type": "structure", "members": {} }, + "com.amazonaws.appstream#DisassociateApplicationFromEntitlement": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#DisassociateApplicationFromEntitlementRequest" + }, + "output": { + "target": "com.amazonaws.appstream#DisassociateApplicationFromEntitlementResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#EntitlementNotFoundException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

              Deletes the specified application from the specified entitlement.

              " + } + }, + "com.amazonaws.appstream#DisassociateApplicationFromEntitlementRequest": { + "type": "structure", + "members": { + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the stack with which the entitlement is associated.

              ", + "smithy.api#required": {} + } + }, + "EntitlementName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the entitlement.

              ", + "smithy.api#required": {} + } + }, + "ApplicationIdentifier": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

              The identifier of the application to remove from the entitlement.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.appstream#DisassociateApplicationFromEntitlementResult": { + "type": "structure", + "members": {} + }, "com.amazonaws.appstream#DisassociateFleet": { "type": "operation", "input": { @@ -3471,6 +3793,146 @@ "type": "structure", "members": {} }, + "com.amazonaws.appstream#EntitledApplication": { + "type": "structure", + "members": { + "ApplicationIdentifier": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

              The identifier of the application.

              ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

              The application associated to an entitlement. Access is controlled based on user attributes.

              " + } + }, + "com.amazonaws.appstream#EntitledApplicationList": { + "type": "list", + "member": { + "target": "com.amazonaws.appstream#EntitledApplication" + } + }, + "com.amazonaws.appstream#Entitlement": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the entitlement.

              ", + "smithy.api#required": {} + } + }, + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the stack with which the entitlement is associated.

              ", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.appstream#Description", + "traits": { + "smithy.api#documentation": "

              The description of the entitlement.

              " + } + }, + "AppVisibility": { + "target": "com.amazonaws.appstream#AppVisibility", + "traits": { + "smithy.api#documentation": "

              Specifies whether all or selected apps are entitled.

              ", + "smithy.api#required": {} + } + }, + "Attributes": { + "target": "com.amazonaws.appstream#EntitlementAttributeList", + "traits": { + "smithy.api#documentation": "

              The attributes of the entitlement.

              ", + "smithy.api#required": {} + } + }, + "CreatedTime": { + "target": "com.amazonaws.appstream#Timestamp", + "traits": { + "smithy.api#documentation": "

              The time when the entitlement was created.

              " + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.appstream#Timestamp", + "traits": { + "smithy.api#documentation": "

              The time when the entitlement was last modified.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              Specifies an entitlement. Entitlements control access to specific applications within\n a stack, based on user attributes. Entitlements apply to SAML 2.0 federated user\n identities. Amazon AppStream 2.0 user pool and streaming URL users are entitled to all\n applications in a stack. Entitlements don't apply to the desktop stream view\n application, or to applications managed by a dynamic app provider using the Dynamic\n Application Framework.

              " + } + }, + "com.amazonaws.appstream#EntitlementAlreadyExistsException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.appstream#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

              The entitlement already exists.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.appstream#EntitlementAttribute": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

              A supported AWS IAM SAML PrincipalTag attribute that is matched to the\n associated value when a user identity federates into an Amazon AppStream 2.0 SAML\n application.

              \n

              The following are valid values:

              \n
                \n
              • \n

                roles

                \n
              • \n
              • \n

                department

                \n
              • \n
              • \n

                organization

                \n
              • \n
              • \n

                groups

                \n
              • \n
              • \n

                title

                \n
              • \n
              • \n

                costCenter

                \n
              • \n
              • \n

                userType

                \n
              • \n
              \n

              ", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

              A value that is matched to a supported SAML attribute name when a user identity\n federates into an Amazon AppStream 2.0 SAML application.

              ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

              An attribute associated with an entitlement. Application entitlements work by matching\n a supported SAML 2.0 attribute name to a value when a user identity federates to an\n Amazon AppStream 2.0 SAML application.

              " + } + }, + "com.amazonaws.appstream#EntitlementAttributeList": { + "type": "list", + "member": { + "target": "com.amazonaws.appstream#EntitlementAttribute" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.appstream#EntitlementList": { + "type": "list", + "member": { + "target": "com.amazonaws.appstream#Entitlement" + } + }, + "com.amazonaws.appstream#EntitlementNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.appstream#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

              The entitlement can't be found.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.appstream#ErrorMessage": { "type": "string", "traits": { @@ -4514,6 +4976,77 @@ } } }, + "com.amazonaws.appstream#ListEntitledApplications": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#ListEntitledApplicationsRequest" + }, + "output": { + "target": "com.amazonaws.appstream#ListEntitledApplicationsResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#EntitlementNotFoundException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

              Retrieves a list of entitled applications.

              " + } + }, + "com.amazonaws.appstream#ListEntitledApplicationsRequest": { + "type": "structure", + "members": { + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the stack with which the entitlement is associated.

              ", + "smithy.api#required": {} + } + }, + "EntitlementName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the entitlement.

              ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

              The pagination token used to retrieve the next page of results for this operation.

              " + } + }, + "MaxResults": { + "target": "com.amazonaws.appstream#Integer", + "traits": { + "smithy.api#documentation": "

              The maximum size of each page of results.

              " + } + } + } + }, + "com.amazonaws.appstream#ListEntitledApplicationsResult": { + "type": "structure", + "members": { + "EntitledApplications": { + "target": "com.amazonaws.appstream#EntitledApplicationList", + "traits": { + "smithy.api#documentation": "

              The entitled applications.

              " + } + }, + "NextToken": { + "target": "com.amazonaws.appstream#String", + "traits": { + "smithy.api#documentation": "

              The pagination token used to retrieve the next page of results for this operation.

              " + } + } + } + }, "com.amazonaws.appstream#ListTagsForResource": { "type": "operation", "input": { @@ -4685,6 +5218,9 @@ { "target": "com.amazonaws.appstream#AssociateApplicationFleet" }, + { + "target": "com.amazonaws.appstream#AssociateApplicationToEntitlement" + }, { "target": "com.amazonaws.appstream#AssociateFleet" }, @@ -4706,6 +5242,9 @@ { "target": "com.amazonaws.appstream#CreateDirectoryConfig" }, + { + "target": "com.amazonaws.appstream#CreateEntitlement" + }, { "target": "com.amazonaws.appstream#CreateFleet" }, @@ -4739,6 +5278,9 @@ { "target": "com.amazonaws.appstream#DeleteDirectoryConfig" }, + { + "target": "com.amazonaws.appstream#DeleteEntitlement" + }, { "target": "com.amazonaws.appstream#DeleteFleet" }, @@ -4772,6 +5314,9 @@ { "target": "com.amazonaws.appstream#DescribeDirectoryConfigs" }, + { + "target": "com.amazonaws.appstream#DescribeEntitlements" + }, { "target": "com.amazonaws.appstream#DescribeFleets" }, @@ -4805,6 +5350,9 @@ { "target": "com.amazonaws.appstream#DisassociateApplicationFleet" }, + { + "target": "com.amazonaws.appstream#DisassociateApplicationFromEntitlement" + }, { "target": "com.amazonaws.appstream#DisassociateFleet" }, @@ -4820,6 +5368,9 @@ { "target": "com.amazonaws.appstream#ListAssociatedStacks" }, + { + "target": "com.amazonaws.appstream#ListEntitledApplications" + }, { "target": "com.amazonaws.appstream#ListTagsForResource" }, @@ -4847,6 +5398,9 @@ { "target": "com.amazonaws.appstream#UpdateDirectoryConfig" }, + { + "target": "com.amazonaws.appstream#UpdateEntitlement" + }, { "target": "com.amazonaws.appstream#UpdateFleet" }, @@ -6069,6 +6623,80 @@ } } }, + "com.amazonaws.appstream#UpdateEntitlement": { + "type": "operation", + "input": { + "target": "com.amazonaws.appstream#UpdateEntitlementRequest" + }, + "output": { + "target": "com.amazonaws.appstream#UpdateEntitlementResult" + }, + "errors": [ + { + "target": "com.amazonaws.appstream#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.appstream#EntitlementNotFoundException" + }, + { + "target": "com.amazonaws.appstream#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.appstream#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

              Updates the specified entitlement.

              " + } + }, + "com.amazonaws.appstream#UpdateEntitlementRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the entitlement.

              ", + "smithy.api#required": {} + } + }, + "StackName": { + "target": "com.amazonaws.appstream#Name", + "traits": { + "smithy.api#documentation": "

              The name of the stack with which the entitlement is associated.

              ", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.appstream#Description", + "traits": { + "smithy.api#documentation": "

              The description of the entitlement.

              " + } + }, + "AppVisibility": { + "target": "com.amazonaws.appstream#AppVisibility", + "traits": { + "smithy.api#documentation": "

              Specifies whether all or only selected apps are entitled.

              " + } + }, + "Attributes": { + "target": "com.amazonaws.appstream#EntitlementAttributeList", + "traits": { + "smithy.api#documentation": "

              The attributes of the entitlement.

              " + } + } + } + }, + "com.amazonaws.appstream#UpdateEntitlementResult": { + "type": "structure", + "members": { + "Entitlement": { + "target": "com.amazonaws.appstream#Entitlement", + "traits": { + "smithy.api#documentation": "

              The entitlement.

              " + } + } + } + }, "com.amazonaws.appstream#UpdateFleet": { "type": "operation", "input": { diff --git a/codegen/sdk-codegen/aws-models/appsync.json b/codegen/sdk-codegen/aws-models/appsync.json index eb80e4124982..6844426b2bdc 100644 --- a/codegen/sdk-codegen/aws-models/appsync.json +++ b/codegen/sdk-codegen/aws-models/appsync.json @@ -1253,6 +1253,12 @@ }, "syncConfig": { "target": "com.amazonaws.appsync#SyncConfig" + }, + "maxBatchSize": { + "target": "com.amazonaws.appsync#MaxBatchSize", + "traits": { + "smithy.api#documentation": "

              The maximum batching size for a resolver.

              " + } } } }, @@ -1474,6 +1480,12 @@ "traits": { "smithy.api#documentation": "

              The caching configuration for the resolver.

              " } + }, + "maxBatchSize": { + "target": "com.amazonaws.appsync#MaxBatchSize", + "traits": { + "smithy.api#documentation": "

              The maximum batching size for a resolver.

              " + } } } }, @@ -2481,6 +2493,12 @@ }, "syncConfig": { "target": "com.amazonaws.appsync#SyncConfig" + }, + "maxBatchSize": { + "target": "com.amazonaws.appsync#MaxBatchSize", + "traits": { + "smithy.api#documentation": "

              The maximum batching size for a resolver.

              " + } } }, "traits": { @@ -4050,6 +4068,16 @@ "smithy.api#length": { "min": 1, "max": 65536 + }, + "smithy.api#pattern": "^.*$" + } + }, + "com.amazonaws.appsync#MaxBatchSize": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 2000 } } }, @@ -4300,6 +4328,12 @@ "traits": { "smithy.api#documentation": "

              The caching configuration for the resolver.

              " } + }, + "maxBatchSize": { + "target": "com.amazonaws.appsync#MaxBatchSize", + "traits": { + "smithy.api#documentation": "

              The maximum batching size for a resolver.

              " + } } }, "traits": { @@ -4588,7 +4622,8 @@ "smithy.api#length": { "min": 0, "max": 256 - } + }, + "smithy.api#pattern": "^[\\s\\w+-=\\.:/@]*$" } }, "com.amazonaws.appsync#Type": { @@ -5167,6 +5202,12 @@ }, "syncConfig": { "target": "com.amazonaws.appsync#SyncConfig" + }, + "maxBatchSize": { + "target": "com.amazonaws.appsync#MaxBatchSize", + "traits": { + "smithy.api#documentation": "

              The maximum batching size for a resolver.

              " + } } } }, @@ -5390,6 +5431,12 @@ "traits": { "smithy.api#documentation": "

              The caching configuration for the resolver.

              " } + }, + "maxBatchSize": { + "target": "com.amazonaws.appsync#MaxBatchSize", + "traits": { + "smithy.api#documentation": "

              The maximum batching size for a resolver.

              " + } } } }, diff --git a/codegen/sdk-codegen/aws-models/cloudtrail.json b/codegen/sdk-codegen/aws-models/cloudtrail.json index db55092f89cb..bcb6cafcf263 100644 --- a/codegen/sdk-codegen/aws-models/cloudtrail.json +++ b/codegen/sdk-codegen/aws-models/cloudtrail.json @@ -41,6 +41,15 @@ { "target": "com.amazonaws.cloudtrail#CloudTrailARNInvalidException" }, + { + "target": "com.amazonaws.cloudtrail#ConflictException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InactiveEventDataStoreException" + }, { "target": "com.amazonaws.cloudtrail#InvalidTagParameterException" }, @@ -84,7 +93,8 @@ "TagsList": { "target": "com.amazonaws.cloudtrail#TagsList", "traits": { - "smithy.api#documentation": "

              Contains a list of tags, up to a limit of 50

              " + "smithy.api#documentation": "

              Contains a list of tags, up to a limit of 50

              ", + "smithy.api#required": {} } } }, @@ -197,6 +207,86 @@ "com.amazonaws.cloudtrail#ByteBuffer": { "type": "blob" }, + "com.amazonaws.cloudtrail#CancelQuery": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#CancelQueryRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#CancelQueryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#ConflictException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InactiveEventDataStoreException" + }, + { + "target": "com.amazonaws.cloudtrail#InactiveQueryException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#QueryIdNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Cancels a query if the query is not in a terminated state, such as CANCELLED, FAILED or FINISHED. You must specify an ARN value for EventDataStore. \n The ID of the query that you want to cancel is also required. When you run CancelQuery, the query status might \n show as CANCELLED even if the operation is not yet finished.

              ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cloudtrail#CancelQueryRequest": { + "type": "structure", + "members": { + "EventDataStore": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN (or the ID suffix of the ARN) of an event data store on which the specified query is running.

              ", + "smithy.api#required": {} + } + }, + "QueryId": { + "target": "com.amazonaws.cloudtrail#UUID", + "traits": { + "smithy.api#documentation": "

              The ID of the query that you want to cancel. The QueryId comes from the response of a StartQuery \n operation.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.cloudtrail#CancelQueryResponse": { + "type": "structure", + "members": { + "QueryId": { + "target": "com.amazonaws.cloudtrail#UUID", + "traits": { + "smithy.api#documentation": "

              The ID of the canceled query.

              ", + "smithy.api#required": {} + } + }, + "QueryStatus": { + "target": "com.amazonaws.cloudtrail#QueryStatus", + "traits": { + "smithy.api#documentation": "

              Shows the status of a query after a CancelQuery request. Typically, the values shown are either \n RUNNING or CANCELLED.

              ", + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.cloudtrail#CloudTrailARNInvalidException": { "type": "structure", "members": { @@ -282,30 +372,54 @@ { "target": "com.amazonaws.cloudtrail#AddTags" }, + { + "target": "com.amazonaws.cloudtrail#CancelQuery" + }, + { + "target": "com.amazonaws.cloudtrail#CreateEventDataStore" + }, { "target": "com.amazonaws.cloudtrail#CreateTrail" }, + { + "target": "com.amazonaws.cloudtrail#DeleteEventDataStore" + }, { "target": "com.amazonaws.cloudtrail#DeleteTrail" }, + { + "target": "com.amazonaws.cloudtrail#DescribeQuery" + }, { "target": "com.amazonaws.cloudtrail#DescribeTrails" }, + { + "target": "com.amazonaws.cloudtrail#GetEventDataStore" + }, { "target": "com.amazonaws.cloudtrail#GetEventSelectors" }, { "target": "com.amazonaws.cloudtrail#GetInsightSelectors" }, + { + "target": "com.amazonaws.cloudtrail#GetQueryResults" + }, { "target": "com.amazonaws.cloudtrail#GetTrail" }, { "target": "com.amazonaws.cloudtrail#GetTrailStatus" }, + { + "target": "com.amazonaws.cloudtrail#ListEventDataStores" + }, { "target": "com.amazonaws.cloudtrail#ListPublicKeys" }, + { + "target": "com.amazonaws.cloudtrail#ListQueries" + }, { "target": "com.amazonaws.cloudtrail#ListTags" }, @@ -324,12 +438,21 @@ { "target": "com.amazonaws.cloudtrail#RemoveTags" }, + { + "target": "com.amazonaws.cloudtrail#RestoreEventDataStore" + }, { "target": "com.amazonaws.cloudtrail#StartLogging" }, + { + "target": "com.amazonaws.cloudtrail#StartQuery" + }, { "target": "com.amazonaws.cloudtrail#StopLogging" }, + { + "target": "com.amazonaws.cloudtrail#UpdateEventDataStore" + }, { "target": "com.amazonaws.cloudtrail#UpdateTrail" } @@ -375,6 +498,169 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.cloudtrail#CreateEventDataStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#CreateEventDataStoreRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#CreateEventDataStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#CloudTrailAccessNotEnabledException" + }, + { + "target": "com.amazonaws.cloudtrail#ConflictException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreAlreadyExistsException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreMaxLimitExceededException" + }, + { + "target": "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidTagParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#OrganizationNotInAllFeaturesModeException" + }, + { + "target": "com.amazonaws.cloudtrail#OrganizationsNotInUseException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Creates a new event data store.

              " + } + }, + "com.amazonaws.cloudtrail#CreateEventDataStoreRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.cloudtrail#EventDataStoreName", + "traits": { + "smithy.api#documentation": "

              The name of the event data store.

              ", + "smithy.api#required": {} + } + }, + "AdvancedEventSelectors": { + "target": "com.amazonaws.cloudtrail#AdvancedEventSelectors", + "traits": { + "smithy.api#documentation": "

              The advanced event selectors to use to select the events for the data store. For more information about how to use advanced event \n selectors, see Log events by using advanced event selectors in the CloudTrail \n User Guide.

              " + } + }, + "MultiRegionEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Specifies whether the event data store includes events from all regions, or only from the region in which the event data store \n is created.

              " + } + }, + "OrganizationEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Specifies whether an event data store collects events logged for an organization in Organizations.

              " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.cloudtrail#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

              The retention period of the event data store, in days. You can set a retention period of up to 2555 days, \n the equivalent of seven years.

              " + } + }, + "TerminationProtectionEnabled": { + "target": "com.amazonaws.cloudtrail#TerminationProtectionEnabled", + "traits": { + "smithy.api#documentation": "

              Specifies whether termination protection is enabled for the event data store. If termination protection is enabled, you \n cannot delete the event data store until termination protection is disabled.

              " + } + }, + "TagsList": { + "target": "com.amazonaws.cloudtrail#TagsList" + } + } + }, + "com.amazonaws.cloudtrail#CreateEventDataStoreResponse": { + "type": "structure", + "members": { + "EventDataStoreArn": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN of the event data store.

              " + } + }, + "Name": { + "target": "com.amazonaws.cloudtrail#EventDataStoreName", + "traits": { + "smithy.api#documentation": "

              The name of the event data store.

              " + } + }, + "Status": { + "target": "com.amazonaws.cloudtrail#EventDataStoreStatus", + "traits": { + "smithy.api#documentation": "

              The status of event data store creation.

              " + } + }, + "AdvancedEventSelectors": { + "target": "com.amazonaws.cloudtrail#AdvancedEventSelectors", + "traits": { + "smithy.api#documentation": "

              The advanced event selectors that were used to select the events for the data store.

              " + } + }, + "MultiRegionEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether the event data store collects events from all regions, or only from the region in which it was created.

              " + } + }, + "OrganizationEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether an event data store is collecting logged events for an organization in Organizations.

              " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.cloudtrail#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

              The retention period of an event data store, in days.

              " + } + }, + "TerminationProtectionEnabled": { + "target": "com.amazonaws.cloudtrail#TerminationProtectionEnabled", + "traits": { + "smithy.api#documentation": "

              Indicates whether termination protection is enabled for the event data store.

              " + } + }, + "TagsList": { + "target": "com.amazonaws.cloudtrail#TagsList" + }, + "CreatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The timestamp that shows when the event data store was created.

              " + } + }, + "UpdatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The timestamp that shows when an event data store was updated, if applicable. \n UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

              " + } + } + } + }, "com.amazonaws.cloudtrail#CreateTrail": { "type": "operation", "input": { @@ -393,6 +679,9 @@ { "target": "com.amazonaws.cloudtrail#CloudWatchLogsDeliveryUnavailableException" }, + { + "target": "com.amazonaws.cloudtrail#ConflictException" + }, { "target": "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException" }, @@ -675,6 +964,60 @@ "com.amazonaws.cloudtrail#Date": { "type": "timestamp" }, + "com.amazonaws.cloudtrail#DeleteEventDataStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#DeleteEventDataStoreRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#DeleteEventDataStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreTerminationProtectedException" + }, + { + "target": "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Disables the event data store specified by EventDataStore, which accepts an event data store ARN. \n After you run DeleteEventDataStore, the event data store is automatically deleted after a wait period of \n seven days. TerminationProtectionEnabled must be set to False on the event data store; this \n operation cannot work if TerminationProtectionEnabled is True.

              \n

              After you run DeleteEventDataStore on an event data store, you cannot run ListQueries, \n DescribeQuery, or GetQueryResults on queries that are using an event data store in a \n PENDING_DELETION state.

              " + } + }, + "com.amazonaws.cloudtrail#DeleteEventDataStoreRequest": { + "type": "structure", + "members": { + "EventDataStore": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN (or the ID suffix of the ARN) of the event data store to delete.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.cloudtrail#DeleteEventDataStoreResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.cloudtrail#DeleteTrail": { "type": "operation", "input": { @@ -736,26 +1079,116 @@ "smithy.api#documentation": "

              Returns the objects or data listed below if successful. Otherwise, returns an error.

              " } }, - "com.amazonaws.cloudtrail#DescribeTrails": { + "com.amazonaws.cloudtrail#DescribeQuery": { "type": "operation", "input": { - "target": "com.amazonaws.cloudtrail#DescribeTrailsRequest" + "target": "com.amazonaws.cloudtrail#DescribeQueryRequest" }, "output": { - "target": "com.amazonaws.cloudtrail#DescribeTrailsResponse" + "target": "com.amazonaws.cloudtrail#DescribeQueryResponse" }, "errors": [ { - "target": "com.amazonaws.cloudtrail#InvalidTrailNameException" + "target": "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException" }, { - "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" }, { - "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" - } - ], - "traits": { + "target": "com.amazonaws.cloudtrail#InactiveEventDataStoreException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#QueryIdNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Returns metadata about a query, including query run time in milliseconds, number of events scanned and matched, and query \n status. You must specify an ARN for EventDataStore, and a value for QueryID.

              ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cloudtrail#DescribeQueryRequest": { + "type": "structure", + "members": { + "EventDataStore": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN (or the ID suffix of the ARN) of an event data store on which the specified query was run.

              ", + "smithy.api#required": {} + } + }, + "QueryId": { + "target": "com.amazonaws.cloudtrail#UUID", + "traits": { + "smithy.api#documentation": "

              The query ID.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.cloudtrail#DescribeQueryResponse": { + "type": "structure", + "members": { + "QueryId": { + "target": "com.amazonaws.cloudtrail#UUID", + "traits": { + "smithy.api#documentation": "

              The ID of the query.

              " + } + }, + "QueryString": { + "target": "com.amazonaws.cloudtrail#QueryStatement", + "traits": { + "smithy.api#documentation": "

              The SQL code of a query.

              " + } + }, + "QueryStatus": { + "target": "com.amazonaws.cloudtrail#QueryStatus", + "traits": { + "smithy.api#documentation": "

              The status of a query. Values for QueryStatus include QUEUED, RUNNING, \n FINISHED, FAILED, or CANCELLED\n

              " + } + }, + "QueryStatistics": { + "target": "com.amazonaws.cloudtrail#QueryStatisticsForDescribeQuery", + "traits": { + "smithy.api#documentation": "

              Metadata about a query, including the number of events that were matched, the total number of events scanned, the query run time \n in milliseconds, and the query's creation time.

              " + } + }, + "ErrorMessage": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              The error message returned if a query failed.

              " + } + } + } + }, + "com.amazonaws.cloudtrail#DescribeTrails": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#DescribeTrailsRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#DescribeTrailsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#InvalidTrailNameException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { "smithy.api#documentation": "

              Retrieves settings for one or more trails associated with the current region for your account.

              ", "smithy.api#idempotent": {} } @@ -795,7 +1228,14 @@ } }, "com.amazonaws.cloudtrail#ErrorMessage": { - "type": "string" + "type": "string", + "traits": { + "smithy.api#length": { + "min": 4, + "max": 1000 + }, + "smithy.api#pattern": ".*" + } }, "com.amazonaws.cloudtrail#Event": { "type": "structure", @@ -870,6 +1310,219 @@ ] } }, + "com.amazonaws.cloudtrail#EventDataStore": { + "type": "structure", + "members": { + "EventDataStoreArn": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN of the event data store.

              " + } + }, + "Name": { + "target": "com.amazonaws.cloudtrail#EventDataStoreName", + "traits": { + "smithy.api#documentation": "

              The name of the event data store.

              " + } + }, + "TerminationProtectionEnabled": { + "target": "com.amazonaws.cloudtrail#TerminationProtectionEnabled", + "traits": { + "smithy.api#documentation": "

              Indicates whether the event data store is protected from termination.

              " + } + }, + "Status": { + "target": "com.amazonaws.cloudtrail#EventDataStoreStatus", + "traits": { + "smithy.api#documentation": "

              The status of an event data store. Values are ENABLED and PENDING_DELETION.

              " + } + }, + "AdvancedEventSelectors": { + "target": "com.amazonaws.cloudtrail#AdvancedEventSelectors", + "traits": { + "smithy.api#documentation": "

              The advanced event selectors that were used to select events for the data store.

              " + } + }, + "MultiRegionEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether the event data store includes events from all regions, or only from the region in which it was created.

              " + } + }, + "OrganizationEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates that an event data store is collecting logged events for an organization.

              " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.cloudtrail#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

              The retention period, in days.

              " + } + }, + "CreatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The timestamp of the event data store's creation.

              " + } + }, + "UpdatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The timestamp showing when an event data store was updated, if applicable. UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              A storage lake of event data against which you can run complex SQL-based queries. An event data store can include events \n that you have logged on your account from the last 90 to 2555 days \n (about three months to up to seven years). To select events for an event data store, \n use advanced event selectors.

              " + } + }, + "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "EventDataStoreARNInvalid", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              The specified event data store ARN is not valid or does not map to an event data store in your account.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.cloudtrail#EventDataStoreAlreadyExistsException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "EventDataStoreAlreadyExists", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              An event data store with that name already exists.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.cloudtrail#EventDataStoreArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9._/\\-:]+$" + } + }, + "com.amazonaws.cloudtrail#EventDataStoreMaxLimitExceededException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "EventDataStoreMaxLimitExceeded", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              Your account has used the maximum number of event data stores.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.cloudtrail#EventDataStoreName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9._\\-]+$" + } + }, + "com.amazonaws.cloudtrail#EventDataStoreNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "EventDataStoreNotFound", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "

              The specified event data store was not found.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.cloudtrail#EventDataStoreStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATED", + "name": "CREATED" + }, + { + "value": "ENABLED", + "name": "ENABLED" + }, + { + "value": "PENDING_DELETION", + "name": "PENDING_DELETION" + } + ] + } + }, + "com.amazonaws.cloudtrail#EventDataStoreTerminationProtectedException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "EventDataStoreTerminationProtectedException", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              The event data store cannot be deleted because termination protection is enabled for it.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.cloudtrail#EventDataStores": { + "type": "list", + "member": { + "target": "com.amazonaws.cloudtrail#EventDataStore" + } + }, "com.amazonaws.cloudtrail#EventSelector": { "type": "structure", "members": { @@ -920,6 +1573,113 @@ "target": "com.amazonaws.cloudtrail#String" } }, + "com.amazonaws.cloudtrail#GetEventDataStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#GetEventDataStoreRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#GetEventDataStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Returns information about an event data store specified as either an ARN or the ID portion of the ARN.

              ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cloudtrail#GetEventDataStoreRequest": { + "type": "structure", + "members": { + "EventDataStore": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN (or ID suffix of the ARN) of the event data store about which you want information.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.cloudtrail#GetEventDataStoreResponse": { + "type": "structure", + "members": { + "EventDataStoreArn": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The event data store Amazon Resource Number (ARN).

              " + } + }, + "Name": { + "target": "com.amazonaws.cloudtrail#EventDataStoreName", + "traits": { + "smithy.api#documentation": "

              The name of the event data store.

              " + } + }, + "Status": { + "target": "com.amazonaws.cloudtrail#EventDataStoreStatus", + "traits": { + "smithy.api#documentation": "

              The status of an event data store. Values can be ENABLED and PENDING_DELETION.

              " + } + }, + "AdvancedEventSelectors": { + "target": "com.amazonaws.cloudtrail#AdvancedEventSelectors", + "traits": { + "smithy.api#documentation": "

              The advanced event selectors used to select events for the data store.

              " + } + }, + "MultiRegionEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether the event data store includes events from all regions, or only from the region in which it was created.

              " + } + }, + "OrganizationEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether an event data store is collecting logged events for an organization in Organizations.

              " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.cloudtrail#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

              The retention period of the event data store, in days.

              " + } + }, + "TerminationProtectionEnabled": { + "target": "com.amazonaws.cloudtrail#TerminationProtectionEnabled", + "traits": { + "smithy.api#documentation": "

              Indicates that termination protection is enabled.

              " + } + }, + "CreatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The timestamp of the event data store's creation.

              " + } + }, + "UpdatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              Shows the time that an event data store was updated, if applicable. UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

              " + } + } + } + }, "com.amazonaws.cloudtrail#GetEventSelectors": { "type": "operation", "input": { @@ -1036,7 +1796,118 @@ "InsightSelectors": { "target": "com.amazonaws.cloudtrail#InsightSelectors", "traits": { - "smithy.api#documentation": "

              A JSON string that contains the insight types you want to log on a trail. In this release, only ApiCallRateInsight is supported as an insight type.

              " + "smithy.api#documentation": "

              A JSON string that contains the insight types you want to log on a trail. In this release, ApiErrorRateInsight and \n ApiCallRateInsight are supported as insight types.

              " + } + } + } + }, + "com.amazonaws.cloudtrail#GetQueryResults": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#GetQueryResultsRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#GetQueryResultsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InactiveEventDataStoreException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidMaxResultsException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidNextTokenException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#QueryIdNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Gets event data results of a query. You must specify the QueryID value returned by the StartQuery \n operation, and an ARN for EventDataStore.

              ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken" + } + } + }, + "com.amazonaws.cloudtrail#GetQueryResultsRequest": { + "type": "structure", + "members": { + "EventDataStore": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN (or ID suffix of the ARN) of the event data store against which the query was run.

              ", + "smithy.api#required": {} + } + }, + "QueryId": { + "target": "com.amazonaws.cloudtrail#UUID", + "traits": { + "smithy.api#documentation": "

              The ID of the query for which you want to get results.

              ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.cloudtrail#PaginationToken", + "traits": { + "smithy.api#documentation": "

              A token you can use to get the next page of query results.

              " + } + }, + "MaxQueryResults": { + "target": "com.amazonaws.cloudtrail#MaxQueryResults", + "traits": { + "smithy.api#documentation": "

              The maximum number of query results to display on a single page.

              " + } + } + } + }, + "com.amazonaws.cloudtrail#GetQueryResultsResponse": { + "type": "structure", + "members": { + "QueryStatus": { + "target": "com.amazonaws.cloudtrail#QueryStatus", + "traits": { + "smithy.api#documentation": "

              The status of the query. Values include QUEUED, RUNNING, FINISHED, FAILED, \n or CANCELLED.

              " + } + }, + "QueryStatistics": { + "target": "com.amazonaws.cloudtrail#QueryStatistics", + "traits": { + "smithy.api#documentation": "

              Shows the count of query results.

              " + } + }, + "QueryResultRows": { + "target": "com.amazonaws.cloudtrail#QueryResultRows", + "traits": { + "smithy.api#documentation": "

              Contains the individual event results of the query.

              " + } + }, + "NextToken": { + "target": "com.amazonaws.cloudtrail#PaginationToken", + "traits": { + "smithy.api#documentation": "

              A token you can use to get the next page of query results.

              " + } + }, + "ErrorMessage": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              The error message returned if a query failed.

              " } } } @@ -1240,6 +2111,46 @@ "smithy.api#documentation": "

              Returns the objects or data listed below if successful. Otherwise, returns an error.

              " } }, + "com.amazonaws.cloudtrail#InactiveEventDataStoreException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InactiveEventDataStore", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              The event data store against which you ran your query is inactive.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.cloudtrail#InactiveQueryException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InactiveQuery", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              The specified query cannot be canceled because it is in the FINISHED, FAILED, or \n CANCELLED state.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.cloudtrail#InsightNotEnabledException": { "type": "structure", "members": { @@ -1266,7 +2177,7 @@ "InsightType": { "target": "com.amazonaws.cloudtrail#InsightType", "traits": { - "smithy.api#documentation": "

              The type of Insights events to log on a trail. The valid Insights type in this release is ApiCallRateInsight.

              " + "smithy.api#documentation": "

              The type of insights to log on a trail. ApiCallRateInsight and ApiErrorRateInsight are valid insight types.

              " } } }, @@ -1375,6 +2286,12 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.cloudtrail#Integer": { + "type": "integer", + "traits": { + "smithy.api#box": {} + } + }, "com.amazonaws.cloudtrail#InvalidCloudWatchLogsLogGroupArnException": { "type": "structure", "members": { @@ -1415,6 +2332,26 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.cloudtrail#InvalidDateRangeException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidDateRange", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              A date range for the query was specified that is not valid. For more information \n about writing a query, see Create \n or edit a query in the CloudTrail User Guide.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.cloudtrail#InvalidEventCategoryException": { "type": "structure", "members": { @@ -1435,6 +2372,26 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.cloudtrail#InvalidEventDataStoreStatusException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidEventDataStoreStatus", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              The event data store is not in a status that supports the operation.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.cloudtrail#InvalidEventSelectorsException": { "type": "structure", "members": { @@ -1595,6 +2552,66 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.cloudtrail#InvalidParameterException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidParameter", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              The request includes a parameter that is not valid.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.cloudtrail#InvalidQueryStatementException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidQueryStatement", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              The query that was submitted has validation errors, or uses incorrect syntax or unsupported keywords. For more information \n about writing a query, see Create \n or edit a query in the CloudTrail User Guide.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.cloudtrail#InvalidQueryStatusException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "InvalidQueryStatus", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              The query status is not valid for the operation.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.cloudtrail#InvalidS3BucketNameException": { "type": "structure", "members": { @@ -1796,6 +2813,82 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.cloudtrail#ListEventDataStores": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#ListEventDataStoresRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#ListEventDataStoresResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#InvalidMaxResultsException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidNextTokenException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Returns information about all event data stores in the account, in the current region.

              ", + "smithy.api#idempotent": {}, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.cloudtrail#ListEventDataStoresMaxResultsCount": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.cloudtrail#ListEventDataStoresRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.cloudtrail#PaginationToken", + "traits": { + "smithy.api#documentation": "

              A token you can use to get the next page of event data store results.

              " + } + }, + "MaxResults": { + "target": "com.amazonaws.cloudtrail#ListEventDataStoresMaxResultsCount", + "traits": { + "smithy.api#documentation": "

              The maximum number of event data stores to display on a single page.

              " + } + } + } + }, + "com.amazonaws.cloudtrail#ListEventDataStoresResponse": { + "type": "structure", + "members": { + "EventDataStores": { + "target": "com.amazonaws.cloudtrail#EventDataStores", + "traits": { + "smithy.api#documentation": "

              Contains information about event data stores in the account, in the current region.

              " + } + }, + "NextToken": { + "target": "com.amazonaws.cloudtrail#PaginationToken", + "traits": { + "smithy.api#documentation": "

              A token you can use to get the next page of results.

              " + } + } + } + }, "com.amazonaws.cloudtrail#ListPublicKeys": { "type": "operation", "input": { @@ -1849,29 +2942,148 @@ "smithy.api#documentation": "

              Reserved for future use.

              " } } - }, - "traits": { - "smithy.api#documentation": "

              Requests the public keys for a specified time range.

              " + }, + "traits": { + "smithy.api#documentation": "

              Requests the public keys for a specified time range.

              " + } + }, + "com.amazonaws.cloudtrail#ListPublicKeysResponse": { + "type": "structure", + "members": { + "PublicKeyList": { + "target": "com.amazonaws.cloudtrail#PublicKeyList", + "traits": { + "smithy.api#documentation": "

              Contains an array of PublicKey objects.

              \n \n

              The returned public keys may have validity time ranges that overlap.

              \n
              " + } + }, + "NextToken": { + "target": "com.amazonaws.cloudtrail#String", + "traits": { + "smithy.api#documentation": "

              Reserved for future use.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              Returns the objects or data listed below if successful. Otherwise, returns an error.

              " + } + }, + "com.amazonaws.cloudtrail#ListQueries": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#ListQueriesRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#ListQueriesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InactiveEventDataStoreException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidDateRangeException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidMaxResultsException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidNextTokenException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidQueryStatusException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Returns a list of queries and query statuses for the past seven days. You must specify an ARN value for \n EventDataStore. Optionally, to shorten the list of results, you can specify a time range, \n formatted as timestamps, by adding StartTime and EndTime parameters, and a \n QueryStatus value. Valid values for QueryStatus include QUEUED, RUNNING, \n FINISHED, FAILED, or CANCELLED.

              ", + "smithy.api#idempotent": {}, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.cloudtrail#ListQueriesMaxResultsCount": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.cloudtrail#ListQueriesRequest": { + "type": "structure", + "members": { + "EventDataStore": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN (or the ID suffix of the ARN) of an event data store on which queries were run.

              ", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.cloudtrail#PaginationToken", + "traits": { + "smithy.api#documentation": "

              A token you can use to get the next page of results.

              " + } + }, + "MaxResults": { + "target": "com.amazonaws.cloudtrail#ListQueriesMaxResultsCount", + "traits": { + "smithy.api#documentation": "

              The maximum number of queries to show on a page.

              " + } + }, + "StartTime": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              Use with EndTime to bound a ListQueries request, and limit its results to only those queries run \n within a specified time period.

              " + } + }, + "EndTime": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              Use with StartTime to bound a ListQueries request, and limit its results to only those queries run \n within a specified time period.

              " + } + }, + "QueryStatus": { + "target": "com.amazonaws.cloudtrail#QueryStatus", + "traits": { + "smithy.api#documentation": "

              The status of queries that you want to return in results. Valid values for QueryStatus include QUEUED, RUNNING, \n FINISHED, FAILED, or CANCELLED.

              " + } + } } }, - "com.amazonaws.cloudtrail#ListPublicKeysResponse": { + "com.amazonaws.cloudtrail#ListQueriesResponse": { "type": "structure", "members": { - "PublicKeyList": { - "target": "com.amazonaws.cloudtrail#PublicKeyList", + "Queries": { + "target": "com.amazonaws.cloudtrail#Queries", "traits": { - "smithy.api#documentation": "

              Contains an array of PublicKey objects.

              \n \n

              The returned public keys may have validity time ranges that overlap.

              \n
              " + "smithy.api#documentation": "

              Lists matching query results, and shows query ID, status, and creation time of each query.

              " } }, "NextToken": { - "target": "com.amazonaws.cloudtrail#String", + "target": "com.amazonaws.cloudtrail#PaginationToken", "traits": { - "smithy.api#documentation": "

              Reserved for future use.

              " + "smithy.api#documentation": "

              A token you can use to get the next page of results.

              " } } - }, - "traits": { - "smithy.api#documentation": "

              Returns the objects or data listed below if successful. Otherwise, returns an error.

              " } }, "com.amazonaws.cloudtrail#ListTags": { @@ -1886,6 +3098,12 @@ { "target": "com.amazonaws.cloudtrail#CloudTrailARNInvalidException" }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InactiveEventDataStoreException" + }, { "target": "com.amazonaws.cloudtrail#InvalidTokenException" }, @@ -2010,6 +3228,12 @@ } } }, + "com.amazonaws.cloudtrail#Long": { + "type": "long", + "traits": { + "smithy.api#box": {} + } + }, "com.amazonaws.cloudtrail#LookupAttribute": { "type": "structure", "members": { @@ -2183,6 +3407,36 @@ "smithy.api#documentation": "

              Contains a response to a LookupEvents action.

              " } }, + "com.amazonaws.cloudtrail#MaxConcurrentQueriesException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "MaxConcurrentQueries", + "httpResponseCode": 429 + }, + "smithy.api#documentation": "

              You are already running the maximum number of concurrent queries. Wait a minute for some queries to finish, and then \n run the query again.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, + "com.amazonaws.cloudtrail#MaxQueryResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, "com.amazonaws.cloudtrail#MaxResults": { "type": "integer", "traits": { @@ -2317,6 +3571,16 @@ "smithy.api#httpError": 404 } }, + "com.amazonaws.cloudtrail#PaginationToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 4, + "max": 1000 + }, + "smithy.api#pattern": ".*" + } + }, "com.amazonaws.cloudtrail#PublicKey": { "type": "structure", "members": { @@ -2485,7 +3749,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Lets you enable Insights event logging by specifying the Insights\n selectors that you want to enable on an existing trail. You also use\n PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. \n The valid Insights event type in this release is ApiCallRateInsight.

              ", + "smithy.api#documentation": "

              Lets you enable Insights event logging by specifying the Insights\n selectors that you want to enable on an existing trail. You also use\n PutInsightSelectors to turn off Insights event logging, by passing an empty list of insight types. \n The valid Insights event types in this release are ApiErrorRateInsight and ApiCallRateInsight.

              ", "smithy.api#idempotent": {} } }, @@ -2502,7 +3766,7 @@ "InsightSelectors": { "target": "com.amazonaws.cloudtrail#InsightSelectors", "traits": { - "smithy.api#documentation": "

              A JSON string that contains the Insights types that you want to log on a trail. The valid Insights type in this release is ApiCallRateInsight.

              ", + "smithy.api#documentation": "

              A JSON string that contains the insight types you want to log on a trail. ApiCallRateInsight and ApiErrorRateInsight are valid insight types.

              ", "smithy.api#required": {} } } @@ -2520,9 +3784,177 @@ "InsightSelectors": { "target": "com.amazonaws.cloudtrail#InsightSelectors", "traits": { - "smithy.api#documentation": "

              A JSON string that contains the Insights event types that you want to log on a trail. The valid Insights type in this release is ApiCallRateInsight.

              " + "smithy.api#documentation": "

              A JSON string that contains the Insights event types that you want to log on a trail. The valid Insights types in this release are \n ApiErrorRateInsight and ApiCallRateInsight.

              " + } + } + } + }, + "com.amazonaws.cloudtrail#Queries": { + "type": "list", + "member": { + "target": "com.amazonaws.cloudtrail#Query" + } + }, + "com.amazonaws.cloudtrail#Query": { + "type": "structure", + "members": { + "QueryId": { + "target": "com.amazonaws.cloudtrail#UUID", + "traits": { + "smithy.api#documentation": "

              The ID of a query.

              " + } + }, + "QueryStatus": { + "target": "com.amazonaws.cloudtrail#QueryStatus", + "traits": { + "smithy.api#documentation": "

              The status of the query. This can be QUEUED, RUNNING, FINISHED, FAILED, \n or CANCELLED.

              " + } + }, + "CreationTime": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The creation time of a query.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              A SQL string of criteria about events that you want to collect in an event data store.

              " + } + }, + "com.amazonaws.cloudtrail#QueryIdNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "QueryIdNotFound", + "httpResponseCode": 404 + }, + "smithy.api#documentation": "

              The query ID does not exist or does not map to a query.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.cloudtrail#QueryResultColumn": { + "type": "map", + "key": { + "target": "com.amazonaws.cloudtrail#QueryResultKey" + }, + "value": { + "target": "com.amazonaws.cloudtrail#QueryResultValue" + } + }, + "com.amazonaws.cloudtrail#QueryResultKey": { + "type": "string" + }, + "com.amazonaws.cloudtrail#QueryResultRow": { + "type": "list", + "member": { + "target": "com.amazonaws.cloudtrail#QueryResultColumn" + } + }, + "com.amazonaws.cloudtrail#QueryResultRows": { + "type": "list", + "member": { + "target": "com.amazonaws.cloudtrail#QueryResultRow" + } + }, + "com.amazonaws.cloudtrail#QueryResultValue": { + "type": "string" + }, + "com.amazonaws.cloudtrail#QueryStatement": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10000 + }, + "smithy.api#pattern": "^(?s)" + } + }, + "com.amazonaws.cloudtrail#QueryStatistics": { + "type": "structure", + "members": { + "ResultsCount": { + "target": "com.amazonaws.cloudtrail#Integer", + "traits": { + "smithy.api#documentation": "

              The number of results returned.

              " + } + }, + "TotalResultsCount": { + "target": "com.amazonaws.cloudtrail#Integer", + "traits": { + "smithy.api#documentation": "

              The total number of results returned by a query.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              Metadata about a query, such as the number of results.

              " + } + }, + "com.amazonaws.cloudtrail#QueryStatisticsForDescribeQuery": { + "type": "structure", + "members": { + "EventsMatched": { + "target": "com.amazonaws.cloudtrail#Long", + "traits": { + "smithy.api#documentation": "

              The number of events that matched a query.

              " + } + }, + "EventsScanned": { + "target": "com.amazonaws.cloudtrail#Long", + "traits": { + "smithy.api#documentation": "

              The number of events that the query scanned in the event data store.

              " + } + }, + "ExecutionTimeInMillis": { + "target": "com.amazonaws.cloudtrail#Integer", + "traits": { + "smithy.api#documentation": "

              The query's run time, in milliseconds.

              " + } + }, + "CreationTime": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The creation time of the query.

              " } } + }, + "traits": { + "smithy.api#documentation": "

              Gets metadata about a query, including the number of events that were matched, the total number of events scanned, the query run time \n in milliseconds, and the query's creation time.

              " + } + }, + "com.amazonaws.cloudtrail#QueryStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "QUEUED", + "name": "QUEUED" + }, + { + "value": "RUNNING", + "name": "RUNNING" + }, + { + "value": "FINISHED", + "name": "FINISHED" + }, + { + "value": "FAILED", + "name": "FAILED" + }, + { + "value": "CANCELLED", + "name": "CANCELLED" + } + ] } }, "com.amazonaws.cloudtrail#ReadWriteType": { @@ -2556,6 +3988,12 @@ { "target": "com.amazonaws.cloudtrail#CloudTrailARNInvalidException" }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InactiveEventDataStoreException" + }, { "target": "com.amazonaws.cloudtrail#InvalidTagParameterException" }, @@ -2596,7 +4034,8 @@ "TagsList": { "target": "com.amazonaws.cloudtrail#TagsList", "traits": { - "smithy.api#documentation": "

              Specifies a list of tags to be removed.

              " + "smithy.api#documentation": "

              Specifies a list of tags to be removed.

              ", + "smithy.api#required": {} } } }, @@ -2666,50 +4105,187 @@ "smithy.api#httpError": 400 } }, - "com.amazonaws.cloudtrail#ResourceTag": { + "com.amazonaws.cloudtrail#ResourceTag": { + "type": "structure", + "members": { + "ResourceId": { + "target": "com.amazonaws.cloudtrail#String", + "traits": { + "smithy.api#documentation": "

              Specifies the ARN of the resource.

              " + } + }, + "TagsList": { + "target": "com.amazonaws.cloudtrail#TagsList", + "traits": { + "smithy.api#documentation": "

              A list of tags.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              A resource tag.

              " + } + }, + "com.amazonaws.cloudtrail#ResourceTagList": { + "type": "list", + "member": { + "target": "com.amazonaws.cloudtrail#ResourceTag" + } + }, + "com.amazonaws.cloudtrail#ResourceTypeNotSupportedException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.cloudtrail#ErrorMessage", + "traits": { + "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + } + } + }, + "traits": { + "aws.protocols#awsQueryError": { + "code": "ResourceTypeNotSupported", + "httpResponseCode": 400 + }, + "smithy.api#documentation": "

              This exception is thrown when the specified resource type is not supported by CloudTrail.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.cloudtrail#RestoreEventDataStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#RestoreEventDataStoreRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#RestoreEventDataStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#CloudTrailAccessNotEnabledException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreMaxLimitExceededException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidEventDataStoreStatusException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#OrganizationNotInAllFeaturesModeException" + }, + { + "target": "com.amazonaws.cloudtrail#OrganizationsNotInUseException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Restores a deleted event data store specified by EventDataStore, which accepts an event data store ARN. \n You can only restore a deleted event data store within the seven-day wait period after deletion. Restoring an event data store \n can take several minutes, depending on the size of the event data store.

              " + } + }, + "com.amazonaws.cloudtrail#RestoreEventDataStoreRequest": { + "type": "structure", + "members": { + "EventDataStore": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN (or the ID suffix of the ARN) of the event data store that you want to restore.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.cloudtrail#RestoreEventDataStoreResponse": { "type": "structure", "members": { - "ResourceId": { - "target": "com.amazonaws.cloudtrail#String", + "EventDataStoreArn": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", "traits": { - "smithy.api#documentation": "

              Specifies the ARN of the resource.

              " + "smithy.api#documentation": "

              The event data store ARN.

              " } }, - "TagsList": { - "target": "com.amazonaws.cloudtrail#TagsList", + "Name": { + "target": "com.amazonaws.cloudtrail#EventDataStoreName", "traits": { - "smithy.api#documentation": "

              A list of tags.

              " + "smithy.api#documentation": "

              The name of the event data store.

              " } - } - }, - "traits": { - "smithy.api#documentation": "

              A resource tag.

              " - } - }, - "com.amazonaws.cloudtrail#ResourceTagList": { - "type": "list", - "member": { - "target": "com.amazonaws.cloudtrail#ResourceTag" - } - }, - "com.amazonaws.cloudtrail#ResourceTypeNotSupportedException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.cloudtrail#ErrorMessage", + }, + "Status": { + "target": "com.amazonaws.cloudtrail#EventDataStoreStatus", "traits": { - "smithy.api#documentation": "

              Brief description of the exception returned by the request.

              " + "smithy.api#documentation": "

              The status of the event data store.

              " + } + }, + "AdvancedEventSelectors": { + "target": "com.amazonaws.cloudtrail#AdvancedEventSelectors", + "traits": { + "smithy.api#documentation": "

              The advanced event selectors that were used to select events.

              " + } + }, + "MultiRegionEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether the event data store is collecting events from all regions, or only from the region in which the event data \n store was created.

              " + } + }, + "OrganizationEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether an event data store is collecting logged events for an organization in Organizations.

              " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.cloudtrail#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

              The retention period, in days.

              " + } + }, + "TerminationProtectionEnabled": { + "target": "com.amazonaws.cloudtrail#TerminationProtectionEnabled", + "traits": { + "smithy.api#documentation": "

              Indicates that termination protection is enabled and the event data store cannot be automatically deleted.

              " + } + }, + "CreatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The timestamp of an event data store's creation.

              " + } + }, + "UpdatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The timestamp that shows when an event data store was updated, if applicable. \n UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

              " } } - }, + } + }, + "com.amazonaws.cloudtrail#RetentionPeriod": { + "type": "integer", "traits": { - "aws.protocols#awsQueryError": { - "code": "ResourceTypeNotSupported", - "httpResponseCode": 400 - }, - "smithy.api#documentation": "

              This exception is thrown when the specified resource type is not supported by CloudTrail.

              ", - "smithy.api#error": "client", - "smithy.api#httpError": 400 + "smithy.api#box": {}, + "smithy.api#range": { + "min": 7, + "max": 2555 + } } }, "com.amazonaws.cloudtrail#S3BucketDoesNotExistException": { @@ -2810,6 +4386,68 @@ "smithy.api#documentation": "

              Returns the objects or data listed below if successful. Otherwise, returns an error.

              " } }, + "com.amazonaws.cloudtrail#StartQuery": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#StartQueryRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#StartQueryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InactiveEventDataStoreException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidQueryStatementException" + }, + { + "target": "com.amazonaws.cloudtrail#MaxConcurrentQueriesException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Starts a CloudTrail Lake query. The required QueryStatement \n parameter provides your SQL query, enclosed in single quotation marks.

              ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cloudtrail#StartQueryRequest": { + "type": "structure", + "members": { + "QueryStatement": { + "target": "com.amazonaws.cloudtrail#QueryStatement", + "traits": { + "smithy.api#documentation": "

              The SQL code of your query.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.cloudtrail#StartQueryResponse": { + "type": "structure", + "members": { + "QueryId": { + "target": "com.amazonaws.cloudtrail#UUID", + "traits": { + "smithy.api#documentation": "

              The ID of the started query.

              " + } + } + } + }, "com.amazonaws.cloudtrail#StopLogging": { "type": "operation", "input": { @@ -2875,14 +4513,14 @@ "type": "structure", "members": { "Key": { - "target": "com.amazonaws.cloudtrail#String", + "target": "com.amazonaws.cloudtrail#TagKey", "traits": { "smithy.api#documentation": "

              The key in a key-value pair. The key must be must be no longer than 128 Unicode characters. \n The key must be unique for the resource to which it applies.

              ", "smithy.api#required": {} } }, "Value": { - "target": "com.amazonaws.cloudtrail#String", + "target": "com.amazonaws.cloudtrail#TagValue", "traits": { "smithy.api#documentation": "

              The value in a key-value pair of a tag. The value must be no longer than 256 Unicode characters.

              " } @@ -2892,6 +4530,24 @@ "smithy.api#documentation": "

              A custom key-value pair associated with a resource such as a CloudTrail trail.

              " } }, + "com.amazonaws.cloudtrail#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.cloudtrail#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, "com.amazonaws.cloudtrail#TagsLimitExceededException": { "type": "structure", "members": { @@ -2918,7 +4574,17 @@ "target": "com.amazonaws.cloudtrail#Tag" }, "traits": { - "smithy.api#documentation": "

              A list of tags.

              " + "smithy.api#documentation": "

              A list of tags.

              ", + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.cloudtrail#TerminationProtectionEnabled": { + "type": "boolean", + "traits": { + "smithy.api#box": {} } }, "com.amazonaws.cloudtrail#Trail": { @@ -3130,6 +4796,16 @@ "target": "com.amazonaws.cloudtrail#TrailInfo" } }, + "com.amazonaws.cloudtrail#UUID": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "^[a-f0-9\\-]+$" + } + }, "com.amazonaws.cloudtrail#UnsupportedOperationException": { "type": "structure", "members": { @@ -3150,6 +4826,167 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.cloudtrail#UpdateEventDataStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.cloudtrail#UpdateEventDataStoreRequest" + }, + "output": { + "target": "com.amazonaws.cloudtrail#UpdateEventDataStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.cloudtrail#CloudTrailAccessNotEnabledException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreARNInvalidException" + }, + { + "target": "com.amazonaws.cloudtrail#EventDataStoreNotFoundException" + }, + { + "target": "com.amazonaws.cloudtrail#InactiveEventDataStoreException" + }, + { + "target": "com.amazonaws.cloudtrail#InsufficientDependencyServiceAccessPermissionException" + }, + { + "target": "com.amazonaws.cloudtrail#InvalidParameterException" + }, + { + "target": "com.amazonaws.cloudtrail#NotOrganizationMasterAccountException" + }, + { + "target": "com.amazonaws.cloudtrail#OperationNotPermittedException" + }, + { + "target": "com.amazonaws.cloudtrail#OrganizationNotInAllFeaturesModeException" + }, + { + "target": "com.amazonaws.cloudtrail#OrganizationsNotInUseException" + }, + { + "target": "com.amazonaws.cloudtrail#UnsupportedOperationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Updates an event data store. The required EventDataStore value is an ARN or the ID portion of the ARN. \n Other parameters are optional, but at least one optional parameter must be specified, or CloudTrail throws an error. \n RetentionPeriod is in days, and valid values are integers between 90 and 2555. \n By default, TerminationProtection is enabled. AdvancedEventSelectors includes or excludes management \n and data events in your event data store; for more information about AdvancedEventSelectors, see \n PutEventSelectorsRequest$AdvancedEventSelectors.

              ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.cloudtrail#UpdateEventDataStoreRequest": { + "type": "structure", + "members": { + "EventDataStore": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN (or the ID suffix of the ARN) of the event data store that you want to update.

              ", + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.cloudtrail#EventDataStoreName", + "traits": { + "smithy.api#documentation": "

              The event data store name.

              " + } + }, + "AdvancedEventSelectors": { + "target": "com.amazonaws.cloudtrail#AdvancedEventSelectors", + "traits": { + "smithy.api#documentation": "

              The advanced event selectors used to select events for the event data store.

              " + } + }, + "MultiRegionEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Specifies whether an event data store collects events from all regions, or only from the region in which it was created.

              " + } + }, + "OrganizationEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Specifies whether an event data store collects events logged for an organization in Organizations.

              " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.cloudtrail#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

              The retention period, in days.

              " + } + }, + "TerminationProtectionEnabled": { + "target": "com.amazonaws.cloudtrail#TerminationProtectionEnabled", + "traits": { + "smithy.api#documentation": "

              Indicates that termination protection is enabled and the event data store cannot be automatically deleted.

              " + } + } + } + }, + "com.amazonaws.cloudtrail#UpdateEventDataStoreResponse": { + "type": "structure", + "members": { + "EventDataStoreArn": { + "target": "com.amazonaws.cloudtrail#EventDataStoreArn", + "traits": { + "smithy.api#documentation": "

              The ARN of the event data store.

              " + } + }, + "Name": { + "target": "com.amazonaws.cloudtrail#EventDataStoreName", + "traits": { + "smithy.api#documentation": "

              The name of the event data store.

              " + } + }, + "Status": { + "target": "com.amazonaws.cloudtrail#EventDataStoreStatus", + "traits": { + "smithy.api#documentation": "

              The status of an event data store. Values can be ENABLED and PENDING_DELETION.

              " + } + }, + "AdvancedEventSelectors": { + "target": "com.amazonaws.cloudtrail#AdvancedEventSelectors", + "traits": { + "smithy.api#documentation": "

              The advanced event selectors that are applied to the event data store.

              " + } + }, + "MultiRegionEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether the event data store includes events from all regions, or only from the region in which it was created.

              " + } + }, + "OrganizationEnabled": { + "target": "com.amazonaws.cloudtrail#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether an event data store is collecting logged events for an organization in Organizations.

              " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.cloudtrail#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

              The retention period, in days.

              " + } + }, + "TerminationProtectionEnabled": { + "target": "com.amazonaws.cloudtrail#TerminationProtectionEnabled", + "traits": { + "smithy.api#documentation": "

              Indicates whether termination protection is enabled for the event data store.

              " + } + }, + "CreatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The timestamp that shows when an event data store was first created.

              " + } + }, + "UpdatedTimestamp": { + "target": "com.amazonaws.cloudtrail#Date", + "traits": { + "smithy.api#documentation": "

              The timestamp that shows when the event data store was last updated. UpdatedTimestamp is always either the same or newer than the time shown in CreatedTimestamp.

              " + } + } + } + }, "com.amazonaws.cloudtrail#UpdateTrail": { "type": "operation", "input": { diff --git a/codegen/sdk-codegen/aws-models/detective.json b/codegen/sdk-codegen/aws-models/detective.json index 0f703b4e84a9..9632afef46e4 100644 --- a/codegen/sdk-codegen/aws-models/detective.json +++ b/codegen/sdk-codegen/aws-models/detective.json @@ -75,20 +75,20 @@ "AccountId": { "target": "com.amazonaws.detective#AccountId", "traits": { - "smithy.api#documentation": "

              The account identifier of the AWS account.

              ", + "smithy.api#documentation": "

              The account identifier of the Amazon Web Services account.

              ", "smithy.api#required": {} } }, "EmailAddress": { "target": "com.amazonaws.detective#EmailAddress", "traits": { - "smithy.api#documentation": "

              The AWS account root user email address for the AWS account.

              ", + "smithy.api#documentation": "

              The Amazon Web Services account root user email address for the Amazon Web Services\n account.

              ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

              An AWS account that is the administrator account of or a member of a behavior\n graph.

              " + "smithy.api#documentation": "

              An Amazon Web Services account that is the administrator account of or a member of a\n behavior graph.

              " } }, "com.amazonaws.detective#AccountId": { @@ -125,8 +125,55 @@ } } }, + "com.amazonaws.detective#Administrator": { + "type": "structure", + "members": { + "AccountId": { + "target": "com.amazonaws.detective#AccountId", + "traits": { + "smithy.api#documentation": "

              The Amazon Web Services account identifier of the Detective administrator\n account for the organization.

              " + } + }, + "GraphArn": { + "target": "com.amazonaws.detective#GraphArn", + "traits": { + "smithy.api#documentation": "

              The ARN of the organization behavior graph.

              " + } + }, + "DelegationTime": { + "target": "com.amazonaws.detective#Timestamp", + "traits": { + "smithy.api#documentation": "

              The date and time when the Detective administrator account was enabled. The\n value is an ISO8601 formatted string. For example,\n 2021-08-18T16:35:56.284Z.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              Information about the Detective administrator account for an\n organization.

              " + } + }, + "com.amazonaws.detective#AdministratorList": { + "type": "list", + "member": { + "target": "com.amazonaws.detective#Administrator" + } + }, "com.amazonaws.detective#AmazonDetective": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Detective", + "arnNamespace": "detective", + "cloudFormationName": "Detective", + "cloudTrailEventSource": "detective.amazonaws.com", + "endpointPrefix": "api.detective" + }, + "aws.auth#sigv4": { + "name": "detective" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "

              Detective uses machine learning and purpose-built visualizations to help you to\n analyze and investigate security issues across your Amazon Web Services (Amazon Web Services) workloads. Detective automatically extracts time-based events such\n as login attempts, API calls, and network traffic from CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by\n Amazon GuardDuty.

              \n

              The Detective API primarily supports the creation and management of behavior\n graphs. A behavior graph contains the extracted data from a set of member accounts, and is\n created and managed by an administrator account.

              \n

              To add a member account to the behavior graph, the administrator account sends an\n invitation to the account. When the account accepts the invitation, it becomes a member\n account in the behavior graph.

              \n

              Detective is also integrated with Organizations. The organization\n management account designates the Detective administrator account for the\n organization. That account becomes the administrator account for the organization behavior\n graph. The Detective administrator account can enable any organization account as\n a member account in the organization behavior graph. The organization accounts do not\n receive invitations. The Detective administrator account can also invite other\n accounts to the organization behavior graph.

              \n

              Every behavior graph is specific to a Region. You can only use the API to manage\n behavior graphs that belong to the Region that is associated with the currently selected\n endpoint.

              \n

              The administrator account for a behavior graph can use the Detective API to do\n the following:

              \n
                \n
              • \n

                Enable and disable Detective. Enabling Detective creates a new\n behavior graph.

                \n
              • \n
              • \n

                View the list of member accounts in a behavior graph.

                \n
              • \n
              • \n

                Add member accounts to a behavior graph.

                \n
              • \n
              • \n

                Remove member accounts from a behavior graph.

                \n
              • \n
              • \n

                Apply tags to a behavior graph.

                \n
              • \n
              \n

              The organization management account can use the Detective API to select the\n delegated administrator for Detective.

              \n

              The Detective administrator account for an organization can use the Detective API to do the following:

              \n
                \n
              • \n

                Perform all of the functions of an administrator account.

                \n
              • \n
              • \n

                Determine whether to automatically enable new organization accounts as member\n accounts in the organization behavior graph.

                \n
              • \n
              \n

              An invited member account can use the Detective API to do the following:

              \n
                \n
              • \n

                View the list of behavior graphs that they are invited to.

                \n
              • \n
              • \n

                Accept an invitation to contribute to a behavior graph.

                \n
              • \n
              • \n

                Decline an invitation to contribute to a behavior graph.

                \n
              • \n
              • \n

                Remove their account from a behavior graph.

                \n
              • \n
              \n

              All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

              \n \n

              We replaced the term \"master account\" with the term \"administrator account.\" An\n administrator account is used to centrally manage multiple accounts. In the case of\n Detective, the administrator account manages the accounts in their behavior\n graph.

              \n
              ", + "smithy.api#title": "Amazon Detective" + }, "version": "2018-10-26", "operations": [ { @@ -144,9 +191,18 @@ { "target": "com.amazonaws.detective#DeleteMembers" }, + { + "target": "com.amazonaws.detective#DescribeOrganizationConfiguration" + }, + { + "target": "com.amazonaws.detective#DisableOrganizationAdminAccount" + }, { "target": "com.amazonaws.detective#DisassociateMembership" }, + { + "target": "com.amazonaws.detective#EnableOrganizationAdminAccount" + }, { "target": "com.amazonaws.detective#GetMembers" }, @@ -159,6 +215,9 @@ { "target": "com.amazonaws.detective#ListMembers" }, + { + "target": "com.amazonaws.detective#ListOrganizationAdminAccounts" + }, { "target": "com.amazonaws.detective#ListTagsForResource" }, @@ -173,23 +232,11 @@ }, { "target": "com.amazonaws.detective#UntagResource" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "Detective", - "arnNamespace": "detective", - "cloudFormationName": "Detective", - "cloudTrailEventSource": "detective.amazonaws.com", - "endpointPrefix": "api.detective" - }, - "aws.auth#sigv4": { - "name": "detective" }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

              Detective uses machine learning and purpose-built visualizations to help you analyze and\n investigate security issues across your Amazon Web Services (AWS) workloads. Detective automatically\n extracts time-based events such as login attempts, API calls, and network traffic from\n AWS CloudTrail and Amazon Virtual Private Cloud (Amazon VPC) flow logs. It also extracts findings detected by\n Amazon GuardDuty.

              \n

              The Detective API primarily supports the creation and management of behavior graphs. A\n behavior graph contains the extracted data from a set of member accounts, and is created\n and managed by an administrator account.

              \n

              Every behavior graph is specific to a Region. You can only use the API to manage graphs\n that belong to the Region that is associated with the currently selected endpoint.

              \n

              A Detective administrator account can use the Detective API to do the following:

              \n
                \n
              • \n

                Enable and disable Detective. Enabling Detective creates a new behavior graph.

                \n
              • \n
              • \n

                View the list of member accounts in a behavior graph.

                \n
              • \n
              • \n

                Add member accounts to a behavior graph.

                \n
              • \n
              • \n

                Remove member accounts from a behavior graph.

                \n
              • \n
              \n

              A member account can use the Detective API to do the following:

              \n
                \n
              • \n

                View the list of behavior graphs that they are invited to.

                \n
              • \n
              • \n

                Accept an invitation to contribute to a behavior graph.

                \n
              • \n
              • \n

                Decline an invitation to contribute to a behavior graph.

                \n
              • \n
              • \n

                Remove their account from a behavior graph.

                \n
              • \n
              \n

              All API actions are logged as CloudTrail events. See Logging Detective API Calls with CloudTrail.

              \n \n

              We replaced the term \"master account\" with the term \"administrator account.\" An\n administrator account is used to centrally manage multiple accounts. In the case of\n Detective, the administrator account manages the accounts in their behavior graph.

              \n
              ", - "smithy.api#title": "Amazon Detective" - } + { + "target": "com.amazonaws.detective#UpdateOrganizationConfiguration" + } + ] }, "com.amazonaws.detective#Boolean": { "type": "boolean" @@ -233,7 +280,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Creates a new behavior graph for the calling account, and sets that account as the\n administrator account. This operation is called by the account that is enabling\n Detective.

              \n

              Before you try to enable Detective, make sure that your account has been enrolled in\n Amazon GuardDuty for at least 48 hours. If you do not meet this requirement, you cannot enable\n Detective. If you do meet the GuardDuty prerequisite, then when you make the request to enable\n Detective, it checks whether your data volume is within the Detective quota. If it exceeds the\n quota, then you cannot enable Detective.

              \n

              The operation also enables Detective for the calling account in the currently selected\n Region. It returns the ARN of the new behavior graph.

              \n

              \n CreateGraph triggers a process to create the corresponding data tables for\n the new behavior graph.

              \n

              An account can only be the administrator account for one behavior graph within a Region.\n If the same account calls CreateGraph with the same administrator account, it\n always returns the same behavior graph ARN. It does not create a new behavior graph.

              ", + "smithy.api#documentation": "

              Creates a new behavior graph for the calling account, and sets that account as the\n administrator account. This operation is called by the account that is enabling Detective.

              \n

              Before you try to enable Detective, make sure that your account has been\n enrolled in Amazon GuardDuty for at least 48 hours. If you do not meet this\n requirement, you cannot enable Detective. If you do meet the GuardDuty\n prerequisite, then when you make the request to enable Detective, it checks\n whether your data volume is within the Detective quota. If it exceeds the quota,\n then you cannot enable Detective.

              \n

              The operation also enables Detective for the calling account in the currently\n selected Region. It returns the ARN of the new behavior graph.

              \n

              \n CreateGraph triggers a process to create the corresponding data tables for\n the new behavior graph.

              \n

              An account can only be the administrator account for one behavior graph within a Region.\n If the same account calls CreateGraph with the same administrator account, it\n always returns the same behavior graph ARN. It does not create a new behavior graph.

              ", "smithy.api#http": { "method": "POST", "uri": "/graph", @@ -286,7 +333,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Sends a request to invite the specified AWS accounts to be member accounts in the\n behavior graph. This operation can only be called by the administrator account for a\n behavior graph.

              \n

              \n CreateMembers verifies the accounts and then invites the verified accounts.\n The administrator can optionally specify to not send invitation emails to the member\n accounts. This would be used when the administrator manages their member accounts\n centrally.

              \n

              The request provides the behavior graph ARN and the list of accounts to invite.

              \n

              The response separates the requested accounts into two lists:

              \n
                \n
              • \n

                The accounts that CreateMembers was able to start the verification\n for. This list includes member accounts that are being verified, that have passed\n verification and are to be invited, and that have failed verification.

                \n
              • \n
              • \n

                The accounts that CreateMembers was unable to process. This list\n includes accounts that were already invited to be member accounts in the behavior\n graph.

                \n
              • \n
              ", + "smithy.api#documentation": "

              \n CreateMembers is used to send invitations to accounts. For the organization\n behavior graph, the Detective administrator account uses\n CreateMembers to enable organization accounts as member accounts.

              \n

              For invited accounts, CreateMembers sends a request to invite the specified\n Amazon Web Services accounts to be member accounts in the behavior graph. This operation\n can only be called by the administrator account for a behavior graph.

              \n

              \n CreateMembers verifies the accounts and then invites the verified accounts.\n The administrator can optionally specify to not send invitation emails to the member\n accounts. This would be used when the administrator manages their member accounts\n centrally.

              \n

              For organization accounts in the organization behavior graph, CreateMembers\n attempts to enable the accounts. The organization accounts do not receive\n invitations.

              \n

              The request provides the behavior graph ARN and the list of accounts to invite or to\n enable.

              \n

              The response separates the requested accounts into two lists:

              \n
                \n
              • \n

                The accounts that CreateMembers was able to process. For invited\n accounts, includes member accounts that are being verified, that have passed\n verification and are to be invited, and that have failed verification. For\n organization accounts in the organization behavior graph, includes accounts that can\n be enabled and that cannot be enabled.

                \n
              • \n
              • \n

                The accounts that CreateMembers was unable to process. This list\n includes accounts that were already invited to be member accounts in the behavior\n graph.

                \n
              • \n
              ", "smithy.api#http": { "method": "POST", "uri": "/graph/members", @@ -300,7 +347,7 @@ "GraphArn": { "target": "com.amazonaws.detective#GraphArn", "traits": { - "smithy.api#documentation": "

              The ARN of the behavior graph to invite the member accounts to contribute their data\n to.

              ", + "smithy.api#documentation": "

              The ARN of the behavior graph.

              ", "smithy.api#required": {} } }, @@ -313,13 +360,13 @@ "DisableEmailNotification": { "target": "com.amazonaws.detective#Boolean", "traits": { - "smithy.api#documentation": "

              if set to true, then the member accounts do not receive email\n notifications. By default, this is set to false, and the member accounts\n receive email notifications.

              " + "smithy.api#documentation": "

              if set to true, then the invited accounts do not receive email\n notifications. By default, this is set to false, and the invited accounts\n receive email notifications.

              \n

              Organization accounts in the organization behavior graph do not receive email\n notifications.

              " } }, "Accounts": { "target": "com.amazonaws.detective#AccountList", "traits": { - "smithy.api#documentation": "

              The list of AWS accounts to invite to become member accounts in the behavior graph.\n You can invite up to 50 accounts at a time. For each invited account, the account list\n contains the account identifier and the AWS account root user email address.

              ", + "smithy.api#documentation": "

              The list of Amazon Web Services accounts to invite or to enable. You can invite or enable\n up to 50 accounts at a time. For each invited account, the account list contains the\n account identifier and the Amazon Web Services account root user email address. For\n organization accounts in the organization behavior graph, the email address is not\n required.

              ", "smithy.api#required": {} } } @@ -331,13 +378,13 @@ "Members": { "target": "com.amazonaws.detective#MemberDetailList", "traits": { - "smithy.api#documentation": "

              The set of member account invitation requests that Detective was able to process. This\n includes accounts that are being verified, that failed verification, and that passed\n verification and are being sent an invitation.

              " + "smithy.api#documentation": "

              The set of member account invitation or enablement requests that Detective was\n able to process. This includes accounts that are being verified, that failed verification,\n and that passed verification and are being sent an invitation or are being enabled.

              " } }, "UnprocessedAccounts": { "target": "com.amazonaws.detective#UnprocessedAccountList", "traits": { - "smithy.api#documentation": "

              The list of accounts for which Detective was unable to process the invitation request. For\n each account, the list provides the reason why the request could not be processed. The list\n includes accounts that are already member accounts in the behavior graph.

              " + "smithy.api#documentation": "

              The list of accounts for which Detective was unable to process the invitation\n or enablement request. For each account, the list provides the reason why the request could\n not be processed. The list includes accounts that are already member accounts in the\n behavior graph.

              " } } } @@ -359,7 +406,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Disables the specified behavior graph and queues it to be deleted. This operation\n removes the graph from each member account's list of behavior graphs.

              \n

              \n DeleteGraph can only be called by the administrator account for a behavior\n graph.

              ", + "smithy.api#documentation": "

              Disables the specified behavior graph and queues it to be deleted. This operation\n removes the behavior graph from each member account's list of behavior graphs.

              \n

              \n DeleteGraph can only be called by the administrator account for a behavior\n graph.

              ", "smithy.api#http": { "method": "POST", "uri": "/graph/removal", @@ -402,7 +449,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Deletes one or more member accounts from the administrator account's behavior graph.\n This operation can only be called by a Detective administrator account. That account cannot use\n DeleteMembers to delete their own account from the behavior graph. To\n disable a behavior graph, the administrator account uses the DeleteGraph API\n method.

              ", + "smithy.api#documentation": "

              Removes the specified member accounts from the behavior graph. The removed accounts no\n longer contribute data to the behavior graph. This operation can only be called by the\n administrator account for the behavior graph.

              \n

              For invited accounts, the removed accounts are deleted from the list of accounts in the\n behavior graph. To restore the account, the administrator account must send another\n invitation.

              \n

              For organization accounts in the organization behavior graph, the Detective\n administrator account can always enable the organization account again. Organization\n accounts that are not enabled as member accounts are not included in the\n ListMembers results for the organization behavior graph.

              \n

              An administrator account cannot use DeleteMembers to remove their own\n account from the behavior graph. To disable a behavior graph, the administrator account\n uses the DeleteGraph API method.

              ", "smithy.api#http": { "method": "POST", "uri": "/graph/members/removal", @@ -416,14 +463,14 @@ "GraphArn": { "target": "com.amazonaws.detective#GraphArn", "traits": { - "smithy.api#documentation": "

              The ARN of the behavior graph to delete members from.

              ", + "smithy.api#documentation": "

              The ARN of the behavior graph to remove members from.

              ", "smithy.api#required": {} } }, "AccountIds": { "target": "com.amazonaws.detective#AccountIdList", "traits": { - "smithy.api#documentation": "

              The list of AWS account identifiers for the member accounts to delete from the\n behavior graph. You can delete up to 50 member accounts at a time.

              ", + "smithy.api#documentation": "

              The list of Amazon Web Services account identifiers for the member accounts to remove\n from the behavior graph. You can remove up to 50 member accounts at a time.

              ", "smithy.api#required": {} } } @@ -435,17 +482,90 @@ "AccountIds": { "target": "com.amazonaws.detective#AccountIdList", "traits": { - "smithy.api#documentation": "

              The list of AWS account identifiers for the member accounts that Detective successfully\n deleted from the behavior graph.

              " + "smithy.api#documentation": "

              The list of Amazon Web Services account identifiers for the member accounts that Detective successfully removed from the behavior graph.

              " } }, "UnprocessedAccounts": { "target": "com.amazonaws.detective#UnprocessedAccountList", "traits": { - "smithy.api#documentation": "

              The list of member accounts that Detective was not able to delete from the behavior graph.\n For each member account, provides the reason that the deletion could not be\n processed.

              " + "smithy.api#documentation": "

              The list of member accounts that Detective was not able to remove from the\n behavior graph. For each member account, provides the reason that the deletion could not be\n processed.

              " + } + } + } + }, + "com.amazonaws.detective#DescribeOrganizationConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.detective#DescribeOrganizationConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.detective#DescribeOrganizationConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.detective#InternalServerException" + }, + { + "target": "com.amazonaws.detective#TooManyRequestsException" + }, + { + "target": "com.amazonaws.detective#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Returns information about the configuration for the organization behavior graph.\n Currently indicates whether to automatically enable new organization accounts as member\n accounts.

              \n

              Can only be called by the Detective administrator account for the organization.

              ", + "smithy.api#http": { + "method": "POST", + "uri": "/orgs/describeOrganizationConfiguration", + "code": 200 + } + } + }, + "com.amazonaws.detective#DescribeOrganizationConfigurationRequest": { + "type": "structure", + "members": { + "GraphArn": { + "target": "com.amazonaws.detective#GraphArn", + "traits": { + "smithy.api#documentation": "

              The ARN of the organization behavior graph.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.detective#DescribeOrganizationConfigurationResponse": { + "type": "structure", + "members": { + "AutoEnable": { + "target": "com.amazonaws.detective#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether to automatically enable new organization accounts as member accounts\n in the organization behavior graph.

              " } } } }, + "com.amazonaws.detective#DisableOrganizationAdminAccount": { + "type": "operation", + "errors": [ + { + "target": "com.amazonaws.detective#InternalServerException" + }, + { + "target": "com.amazonaws.detective#TooManyRequestsException" + }, + { + "target": "com.amazonaws.detective#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Removes the Detective administrator account for the organization in the current\n Region. Deletes the behavior graph for that account.

              \n

              Can only be called by the organization management account. Before you can select a\n different Detective administrator account, you must remove the Detective\n administrator account in all Regions.

              ", + "smithy.api#http": { + "method": "POST", + "uri": "/orgs/disableAdminAccount", + "code": 200 + } + } + }, "com.amazonaws.detective#DisassociateMembership": { "type": "operation", "input": { @@ -466,7 +586,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Removes the member account from the specified behavior graph. This operation can only be\n called by a member account that has the ENABLED status.

              ", + "smithy.api#documentation": "

              Removes the member account from the specified behavior graph. This operation can only be\n called by an invited member account that has the ENABLED status.

              \n

              \n DisassociateMembership cannot be called by an organization account in the\n organization behavior graph. For the organization behavior graph, the Detective\n administrator account determines which organization accounts to enable or disable as member\n accounts.

              ", "smithy.api#http": { "method": "POST", "uri": "/membership/removal", @@ -505,6 +625,43 @@ } } }, + "com.amazonaws.detective#EnableOrganizationAdminAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.detective#EnableOrganizationAdminAccountRequest" + }, + "errors": [ + { + "target": "com.amazonaws.detective#InternalServerException" + }, + { + "target": "com.amazonaws.detective#TooManyRequestsException" + }, + { + "target": "com.amazonaws.detective#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Designates the Detective administrator account for the organization in the\n current Region.

              \n

              If the account does not have Detective enabled, then enables Detective\n for that account and creates a new behavior graph.

              \n

              Can only be called by the organization management account.

              \n

              The Detective administrator account for an organization must be the same in all\n Regions. If you already designated a Detective administrator account in another\n Region, then you must designate the same account.

              ", + "smithy.api#http": { + "method": "POST", + "uri": "/orgs/enableAdminAccount", + "code": 200 + } + } + }, + "com.amazonaws.detective#EnableOrganizationAdminAccountRequest": { + "type": "structure", + "members": { + "AccountId": { + "target": "com.amazonaws.detective#AccountId", + "traits": { + "smithy.api#documentation": "

              The Amazon Web Services account identifier of the account to designate as the Detective administrator account for the organization.

              ", + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.detective#ErrorMessage": { "type": "string" }, @@ -549,7 +706,7 @@ "AccountIds": { "target": "com.amazonaws.detective#AccountIdList", "traits": { - "smithy.api#documentation": "

              The list of AWS account identifiers for the member account for which to return member\n details. You can request details for up to 50 member accounts at a time.

              \n

              You cannot use GetMembers to retrieve information about member accounts\n that were removed from the behavior graph.

              ", + "smithy.api#documentation": "

              The list of Amazon Web Services account identifiers for the member account for which to\n return member details. You can request details for up to 50 member accounts at a\n time.

              \n

              You cannot use GetMembers to retrieve information about member accounts\n that were removed from the behavior graph.

              ", "smithy.api#required": {} } } @@ -561,7 +718,7 @@ "MemberDetails": { "target": "com.amazonaws.detective#MemberDetailList", "traits": { - "smithy.api#documentation": "

              The member account details that Detective is returning in response to the request.

              " + "smithy.api#documentation": "

              The member account details that Detective is returning in response to the\n request.

              " } }, "UnprocessedAccounts": { @@ -584,7 +741,7 @@ "CreatedTime": { "target": "com.amazonaws.detective#Timestamp", "traits": { - "smithy.api#documentation": "

              The date and time that the behavior graph was created. The value is in milliseconds\n since the epoch.

              " + "smithy.api#documentation": "

              The date and time that the behavior graph was created. The value is an ISO8601 formatted\n string. For example, 2021-08-18T16:35:56.284Z.

              " } } }, @@ -617,6 +774,21 @@ "smithy.api#httpError": 500 } }, + "com.amazonaws.detective#InvitationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "INVITATION", + "name": "INVITATION" + }, + { + "value": "ORGANIZATION", + "name": "ORGANIZATION" + } + ] + } + }, "com.amazonaws.detective#ListGraphs": { "type": "operation", "input": { @@ -698,7 +870,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Retrieves the list of open and accepted behavior graph invitations for the member\n account. This operation can only be called by a member account.

              \n

              Open invitations are invitations that the member account has not responded to.

              \n

              The results do not include behavior graphs for which the member account declined the\n invitation. The results also do not include behavior graphs that the member account\n resigned from or was removed from.

              ", + "smithy.api#documentation": "

              Retrieves the list of open and accepted behavior graph invitations for the member\n account. This operation can only be called by an invited member account.

              \n

              Open invitations are invitations that the member account has not responded to.

              \n

              The results do not include behavior graphs for which the member account declined the\n invitation. The results also do not include behavior graphs that the member account\n resigned from or was removed from.

              ", "smithy.api#http": { "method": "POST", "uri": "/invitations/list", @@ -765,7 +937,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Retrieves the list of member accounts for a behavior graph. Does not return member\n accounts that were removed from the behavior graph.

              ", + "smithy.api#documentation": "

              Retrieves the list of member accounts for a behavior graph.

              \n

              For invited accounts, the results do not include member accounts that were removed from\n the behavior graph.

              \n

              For the organization behavior graph, the results do not include organization accounts\n that the Detective administrator account has not enabled as member\n accounts.

              ", "smithy.api#http": { "method": "POST", "uri": "/graph/members/list", @@ -808,13 +980,80 @@ "MemberDetails": { "target": "com.amazonaws.detective#MemberDetailList", "traits": { - "smithy.api#documentation": "

              The list of member accounts in the behavior graph.

              \n

              The results include member accounts that did not pass verification and member accounts\n that have not yet accepted the invitation to the behavior graph. The results do not include\n member accounts that were removed from the behavior graph.

              " + "smithy.api#documentation": "

              The list of member accounts in the behavior graph.

              \n

              For invited accounts, the results include member accounts that did not pass verification\n and member accounts that have not yet accepted the invitation to the behavior graph. The\n results do not include member accounts that were removed from the behavior graph.

              \n

              For the organization behavior graph, the results do not include organization accounts\n that the Detective administrator account has not enabled as member\n accounts.

              " + } + }, + "NextToken": { + "target": "com.amazonaws.detective#PaginationToken", + "traits": { + "smithy.api#documentation": "

              If there are more member accounts remaining in the results, then use this pagination\n token to request the next page of member accounts.

              " + } + } + } + }, + "com.amazonaws.detective#ListOrganizationAdminAccounts": { + "type": "operation", + "input": { + "target": "com.amazonaws.detective#ListOrganizationAdminAccountsRequest" + }, + "output": { + "target": "com.amazonaws.detective#ListOrganizationAdminAccountsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.detective#InternalServerException" + }, + { + "target": "com.amazonaws.detective#TooManyRequestsException" + }, + { + "target": "com.amazonaws.detective#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Returns information about the Detective administrator account for an\n organization. Can only be called by the organization management account.

              ", + "smithy.api#http": { + "method": "POST", + "uri": "/orgs/adminAccountslist", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.detective#ListOrganizationAdminAccountsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.detective#PaginationToken", + "traits": { + "smithy.api#documentation": "

              For requests to get the next page of results, the pagination token that was returned\n with the previous set of results. The initial request does not include a pagination\n token.

              " + } + }, + "MaxResults": { + "target": "com.amazonaws.detective#MemberResultsLimit", + "traits": { + "smithy.api#documentation": "

              The maximum number of results to return.

              " + } + } + } + }, + "com.amazonaws.detective#ListOrganizationAdminAccountsResponse": { + "type": "structure", + "members": { + "Administrators": { + "target": "com.amazonaws.detective#AdministratorList", + "traits": { + "smithy.api#documentation": "

              The list of delegated administrator accounts.

              " } }, "NextToken": { "target": "com.amazonaws.detective#PaginationToken", "traits": { - "smithy.api#documentation": "

              If there are more member accounts remaining in the results, then this is the pagination\n token to use to request the next page of member accounts.

              " + "smithy.api#documentation": "

              If there are more accounts remaining in the results, then this is the pagination token\n to use to request the next page of accounts.

              " } } } @@ -877,19 +1116,19 @@ "AccountId": { "target": "com.amazonaws.detective#AccountId", "traits": { - "smithy.api#documentation": "

              The AWS account identifier for the member account.

              " + "smithy.api#documentation": "

              The Amazon Web Services account identifier for the member account.

              " } }, "EmailAddress": { "target": "com.amazonaws.detective#EmailAddress", "traits": { - "smithy.api#documentation": "

              The AWS account root user email address for the member account.

              " + "smithy.api#documentation": "

              The Amazon Web Services account root user email address for the member account.

              " } }, "GraphArn": { "target": "com.amazonaws.detective#GraphArn", "traits": { - "smithy.api#documentation": "

              The ARN of the behavior graph that the member account was invited to.

              " + "smithy.api#documentation": "

              The ARN of the behavior graph.

              " } }, "MasterId": { @@ -898,37 +1137,37 @@ "smithy.api#deprecated": { "message": "This property is deprecated. Use AdministratorId instead." }, - "smithy.api#documentation": "

              The AWS account identifier of the administrator account for the behavior graph.

              " + "smithy.api#documentation": "

              The Amazon Web Services account identifier of the administrator account for the behavior\n graph.

              " } }, "AdministratorId": { "target": "com.amazonaws.detective#AccountId", "traits": { - "smithy.api#documentation": "

              The AWS account identifier of the administrator account for the behavior graph.

              " + "smithy.api#documentation": "

              The Amazon Web Services account identifier of the administrator account for the behavior\n graph.

              " } }, "Status": { "target": "com.amazonaws.detective#MemberStatus", "traits": { - "smithy.api#documentation": "

              The current membership status of the member account. The status can have one of the\n following values:

              \n
                \n
              • \n

                \n INVITED - Indicates that the member was sent an invitation but has\n not yet responded.

                \n
              • \n
              • \n

                \n VERIFICATION_IN_PROGRESS - Indicates that Detective is verifying that the\n account identifier and email address provided for the member account match. If they\n do match, then Detective sends the invitation. If the email address and account\n identifier don't match, then the member cannot be added to the behavior graph.

                \n
              • \n
              • \n

                \n VERIFICATION_FAILED - Indicates that the account and email address\n provided for the member account do not match, and Detective did not send an invitation to\n the account.

                \n
              • \n
              • \n

                \n ENABLED - Indicates that the member account accepted the invitation\n to contribute to the behavior graph.

                \n
              • \n
              • \n

                \n ACCEPTED_BUT_DISABLED - Indicates that the member account accepted\n the invitation but is prevented from contributing data to the behavior graph.\n DisabledReason provides the reason why the member account is not\n enabled.

                \n
              • \n
              \n

              Member accounts that declined an invitation or that were removed from the behavior graph\n are not included.

              " + "smithy.api#documentation": "

              The current membership status of the member account. The status can have one of the\n following values:

              \n
                \n
              • \n

                \n INVITED - For invited accounts only. Indicates that the member was\n sent an invitation but has not yet responded.

                \n
              • \n
              • \n

                \n VERIFICATION_IN_PROGRESS - For invited accounts only, indicates that\n Detective is verifying that the account identifier and email address\n provided for the member account match. If they do match, then Detective\n sends the invitation. If the email address and account identifier don't match, then\n the member cannot be added to the behavior graph.

                \n

                For organization accounts in the organization behavior graph, indicates that\n Detective is verifying that the account belongs to the\n organization.

                \n
              • \n
              • \n

                \n VERIFICATION_FAILED - For invited accounts only. Indicates that the\n account and email address provided for the member account do not match, and Detective did not send an invitation to the account.

                \n
              • \n
              • \n

                \n ENABLED - Indicates that the member account currently contributes\n data to the behavior graph. For invited accounts, the member account accepted the\n invitation. For organization accounts in the organization behavior graph, the Detective administrator account enabled the organization account as a member\n account.

                \n
              • \n
              • \n

                \n ACCEPTED_BUT_DISABLED - The account accepted the invitation, or was\n enabled by the Detective administrator account, but is prevented from\n contributing data to the behavior graph. DisabledReason provides the\n reason why the member account is not enabled.

                \n
              • \n
              \n

              Invited accounts that declined an invitation or that were removed from the behavior\n graph are not included. In the organization behavior graph, organization accounts that the\n Detective administrator account did not enable are not included.

              " } }, "DisabledReason": { "target": "com.amazonaws.detective#MemberDisabledReason", "traits": { - "smithy.api#documentation": "

              For member accounts with a status of ACCEPTED_BUT_DISABLED, the reason that\n the member account is not enabled.

              \n

              The reason can have one of the following values:

              \n
                \n
              • \n

                \n VOLUME_TOO_HIGH - Indicates that adding the member account would\n cause the data volume for the behavior graph to be too high.

                \n
              • \n
              • \n

                \n VOLUME_UNKNOWN - Indicates that Detective is unable to verify the data\n volume for the member account. This is usually because the member account is not\n enrolled in Amazon GuardDuty.

                \n
              • \n
              " + "smithy.api#documentation": "

              For member accounts with a status of ACCEPTED_BUT_DISABLED, the reason that\n the member account is not enabled.

              \n

              The reason can have one of the following values:

              \n
                \n
              • \n

                \n VOLUME_TOO_HIGH - Indicates that adding the member account would\n cause the data volume for the behavior graph to be too high.

                \n
              • \n
              • \n

                \n VOLUME_UNKNOWN - Indicates that Detective is unable to\n verify the data volume for the member account. This is usually because the member\n account is not enrolled in Amazon GuardDuty.

                \n
              • \n
              " } }, "InvitedTime": { "target": "com.amazonaws.detective#Timestamp", "traits": { - "smithy.api#documentation": "

              The date and time that Detective sent the invitation to the member account. The value is in\n milliseconds since the epoch.

              " + "smithy.api#documentation": "

              For invited accounts, the date and time that Detective sent the invitation to\n the account. The value is an ISO8601 formatted string. For example,\n 2021-08-18T16:35:56.284Z.

              " } }, "UpdatedTime": { "target": "com.amazonaws.detective#Timestamp", "traits": { - "smithy.api#documentation": "

              The date and time that the member account was last updated. The value is in milliseconds\n since the epoch.

              " + "smithy.api#documentation": "

              The date and time that the member account was last updated. The value is an ISO8601\n formatted string. For example, 2021-08-18T16:35:56.284Z.

              " } }, "VolumeUsageInBytes": { @@ -940,7 +1179,7 @@ "VolumeUsageUpdatedTime": { "target": "com.amazonaws.detective#Timestamp", "traits": { - "smithy.api#documentation": "

              The data and time when the member account data volume was last updated.

              " + "smithy.api#documentation": "

              The data and time when the member account data volume was last updated. The value is an\n ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

              " } }, "PercentOfGraphUtilization": { @@ -958,12 +1197,18 @@ "smithy.api#deprecated": { "message": "This property is deprecated. Use VolumeUsageUpdatedTime instead." }, - "smithy.api#documentation": "

              The date and time when the graph utilization percentage was last updated.

              " + "smithy.api#documentation": "

              The date and time when the graph utilization percentage was last updated. The value is\n an ISO8601 formatted string. For example, 2021-08-18T16:35:56.284Z.

              " + } + }, + "InvitationType": { + "target": "com.amazonaws.detective#InvitationType", + "traits": { + "smithy.api#documentation": "

              The type of behavior graph membership.

              \n

              For an organization account in the organization behavior graph, the type is\n ORGANIZATION.

              \n

              For an account that was invited to a behavior graph, the type is\n INVITATION.

              " } } }, "traits": { - "smithy.api#documentation": "

              Details about a member account that was invited to contribute to a behavior\n graph.

              " + "smithy.api#documentation": "

              Details about a member account in a behavior graph.

              " } }, "com.amazonaws.detective#MemberDetailList": { @@ -1059,7 +1304,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Rejects an invitation to contribute the account data to a behavior graph. This operation\n must be called by a member account that has the INVITED status.

              ", + "smithy.api#documentation": "

              Rejects an invitation to contribute the account data to a behavior graph. This operation\n must be called by an invited member account that has the INVITED\n status.

              \n

              \n RejectInvitation cannot be called by an organization account in the\n organization behavior graph. In the organization behavior graph, organization accounts do\n not receive an invitation.

              ", "smithy.api#http": { "method": "POST", "uri": "/invitation/removal", @@ -1100,7 +1345,7 @@ } }, "traits": { - "smithy.api#documentation": "

              This request cannot be completed for one of the following reasons.

              \n
                \n
              • \n

                The request would cause the number of member accounts in the behavior graph to\n exceed the maximum allowed. A behavior graph cannot have more than 1000 member\n accounts.

                \n
              • \n
              • \n

                The request would cause the data rate for the behavior graph to exceed the maximum\n allowed.

                \n
              • \n
              • \n

                Detective is unable to verify the data rate for the member account. This is usually\n because the member account is not enrolled in Amazon GuardDuty.

                \n
              • \n
              ", + "smithy.api#documentation": "

              This request cannot be completed for one of the following reasons.

              \n
                \n
              • \n

                The request would cause the number of member accounts in the behavior graph to\n exceed the maximum allowed. A behavior graph cannot have more than 1200 member\n accounts.

                \n
              • \n
              • \n

                The request would cause the data rate for the behavior graph to exceed the maximum\n allowed.

                \n
              • \n
              • \n

                Detective is unable to verify the data rate for the member account. This\n is usually because the member account is not enrolled in Amazon GuardDuty.

                \n
              • \n
              ", "smithy.api#error": "client", "smithy.api#httpError": 402 } @@ -1128,7 +1373,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Sends a request to enable data ingest for a member account that has a status of\n ACCEPTED_BUT_DISABLED.

              \n

              For valid member accounts, the status is updated as follows.

              \n
                \n
              • \n

                If Detective enabled the member account, then the new status is\n ENABLED.

                \n
              • \n
              • \n

                If Detective cannot enable the member account, the status remains\n ACCEPTED_BUT_DISABLED.

                \n
              • \n
              ", + "smithy.api#documentation": "

              Sends a request to enable data ingest for a member account that has a status of\n ACCEPTED_BUT_DISABLED.

              \n

              For valid member accounts, the status is updated as follows.

              \n
                \n
              • \n

                If Detective enabled the member account, then the new status is\n ENABLED.

                \n
              • \n
              • \n

                If Detective cannot enable the member account, the status remains\n ACCEPTED_BUT_DISABLED.

                \n
              • \n
              ", "smithy.api#http": { "method": "POST", "uri": "/graph/member/monitoringstate", @@ -1259,13 +1504,26 @@ "smithy.api#timestampFormat": "date-time" } }, + "com.amazonaws.detective#TooManyRequestsException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.detective#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

              The request cannot be completed because too many other requests are occurring at the\n same time.

              ", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, "com.amazonaws.detective#UnprocessedAccount": { "type": "structure", "members": { "AccountId": { "target": "com.amazonaws.detective#AccountId", "traits": { - "smithy.api#documentation": "

              The AWS account identifier of the member account that was not processed.

              " + "smithy.api#documentation": "

              The Amazon Web Services account identifier of the member account that was not\n processed.

              " } }, "Reason": { @@ -1341,6 +1599,49 @@ "type": "structure", "members": {} }, + "com.amazonaws.detective#UpdateOrganizationConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.detective#UpdateOrganizationConfigurationRequest" + }, + "errors": [ + { + "target": "com.amazonaws.detective#InternalServerException" + }, + { + "target": "com.amazonaws.detective#TooManyRequestsException" + }, + { + "target": "com.amazonaws.detective#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Updates the configuration for the Organizations integration in the current Region.\n Can only be called by the Detective administrator account for the\n organization.

              ", + "smithy.api#http": { + "method": "POST", + "uri": "/orgs/updateOrganizationConfiguration", + "code": 200 + } + } + }, + "com.amazonaws.detective#UpdateOrganizationConfigurationRequest": { + "type": "structure", + "members": { + "GraphArn": { + "target": "com.amazonaws.detective#GraphArn", + "traits": { + "smithy.api#documentation": "

              The ARN of the organization behavior graph.

              ", + "smithy.api#required": {} + } + }, + "AutoEnable": { + "target": "com.amazonaws.detective#Boolean", + "traits": { + "smithy.api#documentation": "

              Indicates whether to automatically enable new organization accounts as member accounts\n in the organization behavior graph.

              " + } + } + } + }, "com.amazonaws.detective#ValidationException": { "type": "structure", "members": { diff --git a/codegen/sdk-codegen/aws-models/ec2.json b/codegen/sdk-codegen/aws-models/ec2.json index df6c6581b2c9..582fb8a89a28 100644 --- a/codegen/sdk-codegen/aws-models/ec2.json +++ b/codegen/sdk-codegen/aws-models/ec2.json @@ -1600,7 +1600,7 @@ "name": "ec2" }, "aws.protocols#ec2Query": {}, - "smithy.api#documentation": "Amazon Elastic Compute Cloud\n

              Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the AWS Cloud. \n Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications \n faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically isolated section of the \n AWS Cloud where you can launch AWS resources in a virtual network that you've defined. Amazon Elastic Block Store \n (Amazon EBS) provides block level storage volumes for use with EC2 instances. EBS volumes are highly available \n and reliable storage volumes that can be attached to any running instance and used like a hard drive.

              \n

              To learn more, see the following resources:

              \n ", + "smithy.api#documentation": "Amazon Elastic Compute Cloud\n

              Amazon Elastic Compute Cloud (Amazon EC2) provides secure and resizable computing capacity in the Amazon Web Services Cloud. \n Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications \n faster. Amazon Virtual Private Cloud (Amazon VPC) enables you to provision a logically isolated section of the \n Amazon Web Services Cloud where you can launch Amazon Web Services resources in a virtual network that you've defined. Amazon Elastic Block Store \n (Amazon EBS) provides block level storage volumes for use with EC2 instances. EBS volumes are highly available \n and reliable storage volumes that can be attached to any running instance and used like a hard drive.

              \n

              To learn more, see the following resources:

              \n ", "smithy.api#title": "Amazon Elastic Compute Cloud", "smithy.api#xmlNamespace": { "uri": "http://ec2.amazonaws.com/doc/2016-11-15" @@ -2940,6 +2940,9 @@ { "target": "com.amazonaws.ec2#ModifyVpcEndpointServiceConfiguration" }, + { + "target": "com.amazonaws.ec2#ModifyVpcEndpointServicePayerResponsibility" + }, { "target": "com.amazonaws.ec2#ModifyVpcEndpointServicePermissions" }, @@ -7229,6 +7232,14 @@ "smithy.api#documentation": "

              The ID of the Capacity Reservation Fleet to which the Capacity Reservation belongs. \n\t\t\tOnly valid for Capacity Reservations that were created by a Capacity Reservation Fleet.

              ", "smithy.api#xmlName": "capacityReservationFleetId" } + }, + "PlacementGroupArn": { + "target": "com.amazonaws.ec2#PlacementGroupArn", + "traits": { + "aws.protocols#ec2QueryName": "PlacementGroupArn", + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the cluster placement group in which \n\t\t\tthe Capacity Reservation was created. For more information, see \n\t\t\t\n\t\t\t\tCapacity Reservations for cluster placement groups in the \n\t\t\tAmazon EC2 User Guide.

              ", + "smithy.api#xmlName": "placementGroupArn" + } } }, "traits": { @@ -10019,6 +10030,12 @@ "traits": { "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the Outpost on which to create the Capacity Reservation.

              " } + }, + "PlacementGroupArn": { + "target": "com.amazonaws.ec2#PlacementGroupArn", + "traits": { + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the cluster placement group in which \n\t\t\tto create the Capacity Reservation. For more information, see \n\t\t\t\n\t\t\t\tCapacity Reservations for cluster placement groups in the \n\t\t\tAmazon EC2 User Guide.

              " + } } } }, @@ -10773,7 +10790,7 @@ "Type": { "target": "com.amazonaws.ec2#FleetType", "traits": { - "smithy.api#documentation": "

              The fleet type. The default value is maintain.

              \n
                \n
              • \n

                \n maintain - The EC2 Fleet places an asynchronous request for your desired\n capacity, and continues to maintain your desired Spot capacity by replenishing\n interrupted Spot Instances.

                \n
              • \n
              • \n

                \n request - The EC2 Fleet places an asynchronous one-time request for your\n desired capacity, but does submit Spot requests in alternative capacity pools if Spot\n capacity is unavailable, and does not maintain Spot capacity if Spot Instances are\n interrupted.

                \n
              • \n
              • \n

                \n instant - The EC2 Fleet places a synchronous one-time request for your\n desired capacity, and returns errors for any instances that could not be\n launched.

                \n
              • \n
              \n

              For more information, see EC2 Fleet\n request types in the Amazon EC2 User Guide.

              " + "smithy.api#documentation": "

              The fleet type. The default value is maintain.

              \n
                \n
              • \n

                \n maintain - The EC2 Fleet places an asynchronous request for your desired\n capacity, and continues to maintain your desired Spot capacity by replenishing\n interrupted Spot Instances.

                \n
              • \n
              • \n

                \n request - The EC2 Fleet places an asynchronous one-time request for your\n desired capacity, but does submit Spot requests in alternative capacity pools if Spot\n capacity is unavailable, and does not maintain Spot capacity if Spot Instances are\n interrupted.

                \n
              • \n
              • \n

                \n instant - The EC2 Fleet places a synchronous one-time request for your\n desired capacity, and returns errors for any instances that could not be\n launched.

                \n
              • \n
              \n

              For more information, see EC2 Fleet\n request types in the Amazon EC2 User Guide.

              " } }, "ValidFrom": { @@ -12527,7 +12544,7 @@ "InterfaceType": { "target": "com.amazonaws.ec2#NetworkInterfaceCreationType", "traits": { - "smithy.api#documentation": "

              Indicates the type of network interface. To create an Elastic Fabric Adapter (EFA), specify \n\t\t\tefa. For more information, see \n\t\t\t Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide. To create a trunk network interface, specify \n\t\t efa. For more information, see \n\t\t Network interface trunking in the Amazon Elastic Compute Cloud User Guide.

              " + "smithy.api#documentation": "

              Indicates the type of network interface. To create an Elastic Fabric Adapter (EFA), specify \n\t\t\tefa. For more information, see \n\t\t\t Elastic Fabric Adapter in the Amazon Elastic Compute Cloud User Guide. To create a trunk network interface, specify \n\t\t trunk.

              " } }, "SubnetId": { @@ -18862,7 +18879,7 @@ "target": "com.amazonaws.ec2#DescribeAccountAttributesResult" }, "traits": { - "smithy.api#documentation": "

              Describes attributes of your AWS account. The following are the supported account attributes:

              \n
                \n
              • \n

                \n supported-platforms: Indicates whether your account can launch instances\n into EC2-Classic and EC2-VPC, or only into EC2-VPC.

                \n
              • \n
              • \n

                \n default-vpc: The ID of the default VPC for your account, or\n none.

                \n
              • \n
              • \n

                \n max-instances: This attribute is no longer supported. The returned\n value does not reflect your actual vCPU limit for running On-Demand Instances.\n For more information, see On-Demand Instance Limits in the\n Amazon Elastic Compute Cloud User Guide.

                \n
              • \n
              • \n

                \n vpc-max-security-groups-per-interface: The maximum number of security groups\n that you can assign to a network interface.

                \n
              • \n
              • \n

                \n max-elastic-ips: The maximum number of Elastic IP addresses that you can\n allocate for use with EC2-Classic.

                \n
              • \n
              • \n

                \n vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can\n allocate for use with EC2-VPC.

                \n
              • \n
              " + "smithy.api#documentation": "

              Describes attributes of your Amazon Web Services account. The following are the supported account attributes:

              \n
                \n
              • \n

                \n supported-platforms: Indicates whether your account can launch instances\n into EC2-Classic and EC2-VPC, or only into EC2-VPC.

                \n
              • \n
              • \n

                \n default-vpc: The ID of the default VPC for your account, or\n none.

                \n
              • \n
              • \n

                \n max-instances: This attribute is no longer supported. The returned\n value does not reflect your actual vCPU limit for running On-Demand Instances.\n For more information, see On-Demand Instance Limits in the\n Amazon Elastic Compute Cloud User Guide.

                \n
              • \n
              • \n

                \n vpc-max-security-groups-per-interface: The maximum number of security groups\n that you can assign to a network interface.

                \n
              • \n
              • \n

                \n max-elastic-ips: The maximum number of Elastic IP addresses that you can\n allocate for use with EC2-Classic.

                \n
              • \n
              • \n

                \n vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you can\n allocate for use with EC2-VPC.

                \n
              • \n
              " } }, "com.amazonaws.ec2#DescribeAccountAttributesRequest": { @@ -19430,7 +19447,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

              One or more filters.

              \n\t \t
                \n
              • \n

                \n instance-type - The type of instance for which the Capacity Reservation reserves capacity.

                \n
              • \n
              • \n

                \n owner-id - The ID of the Amazon Web Services account that owns the Capacity Reservation.

                \n
              • \n
              • \n

                \n availability-zone-id - The Availability Zone ID of the Capacity Reservation.

                \n
              • \n
              • \n

                \n instance-platform - The type of operating system for which the Capacity Reservation reserves capacity.

                \n
              • \n
              • \n

                \n availability-zone - The Availability Zone ID of the Capacity Reservation.

                \n
              • \n
              • \n

                \n tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the \n\t \t\t\tfollowing tenancy settings:

                \n\t \t\t\t
                  \n
                • \n

                  \n default - The Capacity Reservation is created on hardware that is shared with other Amazon Web Services accounts.

                  \n
                • \n
                • \n

                  \n dedicated - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single Amazon Web Services account.

                  \n
                • \n
                \n\t \t\t\t
              • \n
              • \n

                \n outpost-arn - The Amazon Resource Name (ARN) of the Outpost on which the Capacity Reservation was created.

                \n
              • \n
              • \n

                \n state - The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:

                \n\t \t\t
                  \n
                • \n

                  \n active- The Capacity Reservation is active and the capacity is available for your use.

                  \n
                • \n
                • \n

                  \n expired - The Capacity Reservation expired automatically at the date and time specified in your request. \n\t \t\t\t\tThe reserved capacity is no longer available for your use.

                  \n
                • \n
                • \n

                  \n cancelled - The Capacity Reservation was cancelled. The reserved capacity is no longer available for your use.

                  \n
                • \n
                • \n

                  \n pending - The Capacity Reservation request was successful but the capacity provisioning is still pending.

                  \n
                • \n
                • \n

                  \n failed - The Capacity Reservation request has failed. A request might fail due to invalid request parameters, \n\t \t\t\t\tcapacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.

                  \n\t \t\t
                • \n
                \n\t \t\t
              • \n
              • \n

                \n start-date - The date and time at which the Capacity Reservation was started.

                \n
              • \n
              • \n

                \n end-date - The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is \n\t \t\t\treleased and you can no longer launch instances into it. The Capacity Reservation's state changes to expired when it reaches its end date and time.

                \n
              • \n
              • \n

                \n end-date-type - Indicates the way in which the Capacity Reservation ends. A Capacity Reservation can have one of the following end types:

                \n\t \t\t
                  \n
                • \n

                  \n unlimited - The Capacity Reservation remains active until you explicitly cancel it.

                  \n
                • \n
                • \n

                  \n limited - The Capacity Reservation expires automatically at a specified date and time.

                  \n
                • \n
                \n\t \t\t
              • \n
              • \n

                \n instance-match-criteria - Indicates the type of instance launches that the Capacity Reservation accepts. The options include:

                \n\t \t\t\t
                  \n
                • \n

                  \n open - The Capacity Reservation accepts all instances that have matching\n\t\t\t\t\t\t\tattributes (instance type, platform, and Availability Zone). Instances\n\t\t\t\t\t\t\tthat have matching attributes launch into the Capacity Reservation\n\t\t\t\t\t\t\tautomatically without specifying any additional parameters.

                  \n
                • \n
                • \n

                  \n targeted - The Capacity Reservation only accepts instances that have matching\n\t\t\t\t\t\t\tattributes (instance type, platform, and Availability Zone), and\n\t\t\t\t\t\t\texplicitly target the Capacity Reservation. This ensures that only\n\t\t\t\t\t\t\tpermitted instances can use the reserved capacity.

                  \n
                • \n
                \n\t \t\t
              • \n
              ", + "smithy.api#documentation": "

              One or more filters.

              \n\t \t
                \n
              • \n

                \n instance-type - The type of instance for which the Capacity Reservation reserves capacity.

                \n
              • \n
              • \n

                \n owner-id - The ID of the Amazon Web Services account that owns the Capacity Reservation.

                \n
              • \n
              • \n

                \n instance-platform - The type of operating system for which the Capacity Reservation reserves capacity.

                \n
              • \n
              • \n

                \n availability-zone - The Availability Zone of the Capacity Reservation.

                \n
              • \n
              • \n

                \n tenancy - Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the \n\t \t\t\tfollowing tenancy settings:

                \n\t \t\t\t
                  \n
                • \n

                  \n default - The Capacity Reservation is created on hardware that is shared with other Amazon Web Services accounts.

                  \n
                • \n
                • \n

                  \n dedicated - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single Amazon Web Services account.

                  \n
                • \n
                \n\t \t\t\t
              • \n
              • \n

                \n outpost-arn - The Amazon Resource Name (ARN) of the Outpost on which the Capacity Reservation was created.

                \n
              • \n
              • \n

                \n state - The current state of the Capacity Reservation. A Capacity Reservation can be in one of the following states:

                \n\t \t\t
                  \n
                • \n

                  \n active- The Capacity Reservation is active and the capacity is available for your use.

                  \n
                • \n
                • \n

                  \n expired - The Capacity Reservation expired automatically at the date and time specified in your request. \n\t \t\t\t\tThe reserved capacity is no longer available for your use.

                  \n
                • \n
                • \n

                  \n cancelled - The Capacity Reservation was cancelled. The reserved capacity is no longer available for your use.

                  \n
                • \n
                • \n

                  \n pending - The Capacity Reservation request was successful but the capacity provisioning is still pending.

                  \n
                • \n
                • \n

                  \n failed - The Capacity Reservation request has failed. A request might fail due to invalid request parameters, \n\t \t\t\t\tcapacity constraints, or instance limit constraints. Failed requests are retained for 60 minutes.

                  \n\t \t\t
                • \n
                \n\t \t\t
              • \n
              • \n

                \n start-date - The date and time at which the Capacity Reservation was started.

                \n
              • \n
              • \n

                \n end-date - The date and time at which the Capacity Reservation expires. When a Capacity Reservation expires, the reserved capacity is \n\t \t\t\treleased and you can no longer launch instances into it. The Capacity Reservation's state changes to expired when it reaches its end date and time.

                \n
              • \n
              • \n

                \n end-date-type - Indicates the way in which the Capacity Reservation ends. A Capacity Reservation can have one of the following end types:

                \n\t \t\t
                  \n
                • \n

                  \n unlimited - The Capacity Reservation remains active until you explicitly cancel it.

                  \n
                • \n
                • \n

                  \n limited - The Capacity Reservation expires automatically at a specified date and time.

                  \n
                • \n
                \n\t \t\t
              • \n
              • \n

                \n instance-match-criteria - Indicates the type of instance launches that the Capacity Reservation accepts. The options include:

                \n\t \t\t\t
                  \n
                • \n

                  \n open - The Capacity Reservation accepts all instances that have matching\n\t\t\t\t\t\t\tattributes (instance type, platform, and Availability Zone). Instances\n\t\t\t\t\t\t\tthat have matching attributes launch into the Capacity Reservation\n\t\t\t\t\t\t\tautomatically without specifying any additional parameters.

                  \n
                • \n
                • \n

                  \n targeted - The Capacity Reservation only accepts instances that have matching\n\t\t\t\t\t\t\tattributes (instance type, platform, and Availability Zone), and\n\t\t\t\t\t\t\texplicitly target the Capacity Reservation. This ensures that only\n\t\t\t\t\t\t\tpermitted instances can use the reserved capacity.

                  \n
                • \n
                \n\t \t\t
              • \n
              ", "smithy.api#xmlName": "Filter" } }, @@ -23564,7 +23581,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

              One or more filters.

              \n
                \n
              • \n

                \n create-time - The time the launch template version was created.

                \n
              • \n
              • \n

                \n ebs-optimized - A boolean that indicates whether the instance\n is optimized for Amazon EBS I/O.

                \n
              • \n
              • \n

                \n iam-instance-profile - The ARN of the IAM instance\n profile.

                \n
              • \n
              • \n

                \n image-id - The ID of the AMI.

                \n
              • \n
              • \n

                \n instance-type - The instance type.

                \n
              • \n
              • \n

                \n is-default-version - A boolean that indicates whether the\n launch template version is the default version.

                \n
              • \n
              • \n

                \n kernel-id - The kernel ID.

                \n
              • \n
              • \n

                \n ram-disk-id - The RAM disk ID.

                \n
              • \n
              ", + "smithy.api#documentation": "

              One or more filters.

              \n
                \n
              • \n

                \n create-time - The time the launch template version was created.

                \n
              • \n
              • \n

                \n ebs-optimized - A boolean that indicates whether the instance\n is optimized for Amazon EBS I/O.

                \n
              • \n
              • \n

                \n http-endpoint - Indicates whether the HTTP metadata endpoint on\n your instances is enabled (enabled | disabled).

                \n
              • \n
              • \n

                \n http-protocol-ipv4 - Indicates whether the IPv4 endpoint for the\n instance metadata service is enabled (enabled |\n disabled).

                \n
              • \n
              • \n

                \n host-resource-group-arn - The ARN of the host resource group in\n which to launch the instances.

                \n
              • \n
              • \n

                \n http-tokens - The state of token usage for your instance metadata\n requests (optional | required).

                \n
              • \n
              • \n

                \n iam-instance-profile - The ARN of the IAM instance\n profile.

                \n
              • \n
              • \n

                \n image-id - The ID of the AMI.

                \n
              • \n
              • \n

                \n instance-type - The instance type.

                \n
              • \n
              • \n

                \n is-default-version - A boolean that indicates whether the\n launch template version is the default version.

                \n
              • \n
              • \n

                \n kernel-id - The kernel ID.

                \n
              • \n
              • \n

                \n license-configuration-arn - The ARN of the license\n configuration.

                \n
              • \n
              • \n

                \n network-card-index - The index of the network card.

                \n
              • \n
              • \n

                \n ram-disk-id - The RAM disk ID.

                \n
              • \n
              ", "smithy.api#xmlName": "Filter" } } @@ -25187,7 +25204,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

              The filters.

              \n
                \n
              • \n

                \n group-name - The name of the placement group.

                \n
              • \n
              • \n

                \n state - The state of the placement group (pending |\n available | deleting |\n deleted).

                \n
              • \n
              • \n

                \n strategy - The strategy of the placement group\n (cluster | spread |\n partition).

                \n
              • \n
              • \n

                \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

                \n
              • \n
              • \n

                \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

                \n
              • \n
              ", + "smithy.api#documentation": "

              The filters.

              \n
                \n
              • \n

                \n group-name - The name of the placement group.

                \n
              • \n
              • \n

                \n group-arn - The Amazon Resource Name (ARN) of the placement group.

                \n
              • \n
              • \n

                \n state - The state of the placement group (pending |\n available | deleting |\n deleted).

                \n
              • \n
              • \n

                \n strategy - The strategy of the placement group\n (cluster | spread |\n partition).

                \n
              • \n
              • \n

                \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

                \n
              • \n
              • \n

                \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources that have a tag with a specific key, regardless of the tag value.

                \n
              • \n
              ", "smithy.api#xmlName": "Filter" } }, @@ -26582,7 +26599,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

              The filters.

              \n
                \n
              • \n

                \n snapshot-id - The snapshot ID.

                \n
              • \n
              • \n

                \n volume-id - The ID of the volume the snapshot is for.

                \n
              • \n
              • \n

                \n last-tiering-operation - The state of the last archive or restore action. (archiving | archival_error |\n archival_complete | restoring | restore_error | restore_complete)

                \n
              • \n
              ", + "smithy.api#documentation": "

              The filters.

              \n
                \n
              • \n

                \n snapshot-id - The snapshot ID.

                \n
              • \n
              • \n

                \n volume-id - The ID of the volume the snapshot is for.

                \n
              • \n
              • \n

                \n last-tiering-operation - The state of the last archive or restore action. (archival-in-progress | archival-completed |\n archival-failed | permanent-restore-in-progress | permanent-restore-completed | permanent-restore-failed | \n\t\ttemporary-restore-in-progress | temporary-restore-completed | temporary-restore-failed)

                \n
              • \n
              ", "smithy.api#xmlName": "Filter" } }, @@ -27379,7 +27396,7 @@ "target": "com.amazonaws.ec2#DescribeStaleSecurityGroupsResult" }, "traits": { - "smithy.api#documentation": "

              [VPC only] Describes the stale security group rules for security groups in a specified VPC. Rules are stale when they reference a deleted security group in a peer VPC, or a security group in a peer VPC for which the VPC peering connection has been deleted.

              ", + "smithy.api#documentation": "

              [VPC only] Describes the stale security group rules for security groups in a specified VPC. \n Rules are stale when they reference a deleted security group in the same VPC or in a peer VPC, \n or if they reference a security group in a peer VPC for which the VPC peering connection has \n been deleted.

              ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -34725,13 +34742,13 @@ "Values": { "target": "com.amazonaws.ec2#ValueStringList", "traits": { - "smithy.api#documentation": "

              The filter values. Filter values are case-sensitive.

              ", + "smithy.api#documentation": "

              The filter values. Filter values are case-sensitive. If you specify multiple values for a \n filter, the values are joined with an OR, and the request returns all results \n that match any of the specified values.

              ", "smithy.api#xmlName": "Value" } } }, "traits": { - "smithy.api#documentation": "

              A filter name and value pair that is used to return a more specific list of results from a describe operation. \n Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

              " + "smithy.api#documentation": "

              A filter name and value pair that is used to return a more specific list of results from a describe operation. \n Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.

              \n

              If you specify multiple filters, the filters are joined with an AND, and the request returns only \n results that match all of the specified filters.

              " } }, "com.amazonaws.ec2#FilterList": { @@ -35468,7 +35485,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "TerminationDelay", - "smithy.api#documentation": "

              The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot\n Instance after launching a new replacement Spot Instance.

              \n

              Valid only when replacementStrategy is set to launch-before-terminate.

              \n

              Valid values: Minimum value of 120 seconds. Maximum value of 7200 seconds.

              ", + "smithy.api#documentation": "

              The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot\n Instance after launching a new replacement Spot Instance.

              \n

              Required when ReplacementStrategy is set to launch-before-terminate.

              \n

              Not valid when ReplacementStrategy is set to launch.

              \n

              Valid values: Minimum value of 120 seconds. Maximum value of 7200 seconds.

              ", "smithy.api#xmlName": "terminationDelay" } } @@ -35489,12 +35506,12 @@ "TerminationDelay": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

              The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot\n Instance after launching a new replacement Spot Instance.

              \n

              Valid only when ReplacementStrategy is set to launch-before-terminate.

              \n

              Valid values: Minimum value of 120 seconds. Maximum value of 7200 seconds.

              " + "smithy.api#documentation": "

              The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot\n Instance after launching a new replacement Spot Instance.

              \n

              Required when ReplacementStrategy is set to launch-before-terminate.

              \n

              Not valid when ReplacementStrategy is set to launch.

              \n

              Valid values: Minimum value of 120 seconds. Maximum value of 7200 seconds.

              " } } }, "traits": { - "smithy.api#documentation": "

              The Spot Instance replacement strategy to use when Amazon EC2 emits a rebalance\n notification signal that your Spot Instance is at an elevated risk of being interrupted.\n For more information, see Capacity rebalancing in the Amazon EC2 User Guide.

              " + "smithy.api#documentation": "

              The Spot Instance replacement strategy to use when Amazon EC2 emits a rebalance\n notification signal that your Spot Instance is at an elevated risk of being interrupted.\n For more information, see Capacity rebalancing in the Amazon EC2 User Guide.

              " } }, "com.amazonaws.ec2#FleetSpotMaintenanceStrategies": { @@ -39140,7 +39157,7 @@ } }, "OfferingId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#OfferingId", "traits": { "aws.protocols#ec2QueryName": "OfferingId", "smithy.api#documentation": "

              The ID of the offering.

              ", @@ -39284,7 +39301,7 @@ } }, "HostReservationId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#HostReservationId", "traits": { "aws.protocols#ec2QueryName": "HostReservationId", "smithy.api#documentation": "

              The ID of the reservation that specifies the associated Dedicated Hosts.

              ", @@ -39308,7 +39325,7 @@ } }, "OfferingId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#OfferingId", "traits": { "aws.protocols#ec2QueryName": "OfferingId", "smithy.api#documentation": "

              The ID of the reservation. This remains the same regardless of which Dedicated\n Hosts are associated with it.

              ", @@ -40312,7 +40329,7 @@ "Architecture": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

              The architecture of the virtual machine.

              \n

              Valid values: i386 | x86_64 | arm64\n

              " + "smithy.api#documentation": "

              The architecture of the virtual machine.

              \n

              Valid values: i386 | x86_64\n

              " } }, "ClientData": { @@ -42946,7 +42963,7 @@ "HttpTokens": { "target": "com.amazonaws.ec2#HttpTokensState", "traits": { - "smithy.api#documentation": "

              The state of token usage for your instance metadata requests. If the parameter is not\n specified in the request, the default state is optional.

              \n

              If the state is optional, you can choose to retrieve instance metadata\n with or without a signed token header on your request. If you retrieve the IAM role\n credentials without a token, the version 1.0 role credentials are returned. If you\n retrieve the IAM role credentials using a valid signed token, the version 2.0 role\n credentials are returned.

              \n

              If the state is required, you must send a signed token header with any\n instance metadata retrieval requests. In this state, retrieving the IAM role credentials\n always returns the version 2.0 credentials; the version 1.0 credentials are not\n available.

              " + "smithy.api#documentation": "

              The state of token usage for your instance metadata requests.

              \n

              If the state is optional, you can choose to retrieve instance metadata\n with or without a signed token header on your request. If you retrieve the IAM role\n credentials without a token, the version 1.0 role credentials are returned. If you\n retrieve the IAM role credentials using a valid signed token, the version 2.0 role\n credentials are returned.

              \n

              If the state is required, you must send a signed token header with any\n instance metadata retrieval requests. In this state, retrieving the IAM role credentials\n always returns the version 2.0 credentials; the version 1.0 credentials are not\n available.

              \n

              Default: optional\n

              " } }, "HttpPutResponseHopLimit": { @@ -42958,7 +42975,7 @@ "HttpEndpoint": { "target": "com.amazonaws.ec2#InstanceMetadataEndpointState", "traits": { - "smithy.api#documentation": "

              Enables or disables the HTTP metadata endpoint on your instances. If the parameter is not \n specified, the default state is enabled.

              \n

              If you specify a value of disabled, you will not be able to access your\n instance metadata.

              " + "smithy.api#documentation": "

              Enables or disables the HTTP metadata endpoint on your instances.

              \n

              If you specify a value of disabled, you cannot access your\n instance metadata.

              \n

              Default: enabled\n

              " } }, "HttpProtocolIpv6": { @@ -42966,6 +42983,12 @@ "traits": { "smithy.api#documentation": "

              Enables or disables the IPv6 endpoint for the instance metadata service.

              " } + }, + "InstanceMetadataTags": { + "target": "com.amazonaws.ec2#InstanceMetadataTagsState", + "traits": { + "smithy.api#documentation": "

              Set to enabled to allow access to instance tags from the instance metadata. Set to disabled to turn off \n access to instance tags from the instance metadata. For more information, see Work with instance tags using the instance metadata.

              \n

              Default: disabled\n

              " + } } }, "traits": { @@ -42987,7 +43010,7 @@ "target": "com.amazonaws.ec2#HttpTokensState", "traits": { "aws.protocols#ec2QueryName": "HttpTokens", - "smithy.api#documentation": "

              The state of token usage for your instance metadata requests. If the parameter is not\n specified in the request, the default state is optional.

              \n

              If the state is optional, you can choose to retrieve instance metadata\n with or without a signed token header on your request. If you retrieve the IAM role\n credentials without a token, the version 1.0 role credentials are returned. If you\n retrieve the IAM role credentials using a valid signed token, the version 2.0 role\n credentials are returned.

              \n

              If the state is required, you must send a signed token header with any\n instance metadata retrieval requests. In this state, retrieving the IAM role credential\n always returns the version 2.0 credentials; the version 1.0 credentials are not\n available.

              ", + "smithy.api#documentation": "

              The state of token usage for your instance metadata requests.

              \n

              If the state is optional, you can choose to retrieve instance metadata\n with or without a signed token header on your request. If you retrieve the IAM role\n credentials without a token, the version 1.0 role credentials are returned. If you\n retrieve the IAM role credentials using a valid signed token, the version 2.0 role\n credentials are returned.

              \n

              If the state is required, you must send a signed token header with any\n instance metadata retrieval requests. In this state, retrieving the IAM role credential\n always returns the version 2.0 credentials; the version 1.0 credentials are not\n available.

              \n

              Default: optional\n

              ", "smithy.api#xmlName": "httpTokens" } }, @@ -43003,7 +43026,7 @@ "target": "com.amazonaws.ec2#InstanceMetadataEndpointState", "traits": { "aws.protocols#ec2QueryName": "HttpEndpoint", - "smithy.api#documentation": "

              Indicates whether the HTTP metadata endpoint on your instances is enabled or disabled.

              ", + "smithy.api#documentation": "

              Indicates whether the HTTP metadata endpoint on your instances is enabled or\n disabled.

              \n

              If the value is disabled, you cannot access your\n instance metadata.

              ", "smithy.api#xmlName": "httpEndpoint" } }, @@ -43014,6 +43037,14 @@ "smithy.api#documentation": "

              Indicates whether the IPv6 endpoint for the instance metadata service is enabled or disabled.

              ", "smithy.api#xmlName": "httpProtocolIpv6" } + }, + "InstanceMetadataTags": { + "target": "com.amazonaws.ec2#InstanceMetadataTagsState", + "traits": { + "aws.protocols#ec2QueryName": "InstanceMetadataTags", + "smithy.api#documentation": "

              Indicates whether access to instance tags from the instance metadata is enabled or\n disabled. For more information, see Work with\n instance tags using the instance metadata.

              ", + "smithy.api#xmlName": "instanceMetadataTags" + } } }, "traits": { @@ -43050,6 +43081,21 @@ ] } }, + "com.amazonaws.ec2#InstanceMetadataTagsState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "disabled", + "name": "disabled" + }, + { + "value": "enabled", + "name": "enabled" + } + ] + } + }, "com.amazonaws.ec2#InstanceMonitoring": { "type": "structure", "members": { @@ -44318,1476 +44364,1464 @@ "traits": { "smithy.api#enum": [ { - "value": "t1.micro", - "name": "t1_micro" + "value": "a1.medium", + "name": "a1_medium" }, { - "value": "t2.nano", - "name": "t2_nano" + "value": "a1.large", + "name": "a1_large" }, { - "value": "t2.micro", - "name": "t2_micro" + "value": "a1.xlarge", + "name": "a1_xlarge" }, { - "value": "t2.small", - "name": "t2_small" + "value": "a1.2xlarge", + "name": "a1_2xlarge" }, { - "value": "t2.medium", - "name": "t2_medium" + "value": "a1.4xlarge", + "name": "a1_4xlarge" }, { - "value": "t2.large", - "name": "t2_large" + "value": "a1.metal", + "name": "a1_metal" }, { - "value": "t2.xlarge", - "name": "t2_xlarge" + "value": "c1.medium", + "name": "c1_medium" }, { - "value": "t2.2xlarge", - "name": "t2_2xlarge" + "value": "c1.xlarge", + "name": "c1_xlarge" }, { - "value": "t3.nano", - "name": "t3_nano" + "value": "c3.large", + "name": "c3_large" }, { - "value": "t3.micro", - "name": "t3_micro" + "value": "c3.xlarge", + "name": "c3_xlarge" }, { - "value": "t3.small", - "name": "t3_small" + "value": "c3.2xlarge", + "name": "c3_2xlarge" }, { - "value": "t3.medium", - "name": "t3_medium" + "value": "c3.4xlarge", + "name": "c3_4xlarge" }, { - "value": "t3.large", - "name": "t3_large" + "value": "c3.8xlarge", + "name": "c3_8xlarge" }, { - "value": "t3.xlarge", - "name": "t3_xlarge" + "value": "c4.large", + "name": "c4_large" }, { - "value": "t3.2xlarge", - "name": "t3_2xlarge" + "value": "c4.xlarge", + "name": "c4_xlarge" }, { - "value": "t3a.nano", - "name": "t3a_nano" + "value": "c4.2xlarge", + "name": "c4_2xlarge" }, { - "value": "t3a.micro", - "name": "t3a_micro" + "value": "c4.4xlarge", + "name": "c4_4xlarge" }, { - "value": "t3a.small", - "name": "t3a_small" + "value": "c4.8xlarge", + "name": "c4_8xlarge" }, { - "value": "t3a.medium", - "name": "t3a_medium" + "value": "c5.large", + "name": "c5_large" }, { - "value": "t3a.large", - "name": "t3a_large" + "value": "c5.xlarge", + "name": "c5_xlarge" }, { - "value": "t3a.xlarge", - "name": "t3a_xlarge" + "value": "c5.2xlarge", + "name": "c5_2xlarge" }, { - "value": "t3a.2xlarge", - "name": "t3a_2xlarge" + "value": "c5.4xlarge", + "name": "c5_4xlarge" }, { - "value": "t4g.nano", - "name": "t4g_nano" + "value": "c5.9xlarge", + "name": "c5_9xlarge" }, { - "value": "t4g.micro", - "name": "t4g_micro" + "value": "c5.12xlarge", + "name": "c5_12xlarge" }, { - "value": "t4g.small", - "name": "t4g_small" + "value": "c5.18xlarge", + "name": "c5_18xlarge" }, { - "value": "t4g.medium", - "name": "t4g_medium" + "value": "c5.24xlarge", + "name": "c5_24xlarge" }, { - "value": "t4g.large", - "name": "t4g_large" + "value": "c5.metal", + "name": "c5_metal" }, { - "value": "t4g.xlarge", - "name": "t4g_xlarge" + "value": "c5a.large", + "name": "c5a_large" }, { - "value": "t4g.2xlarge", - "name": "t4g_2xlarge" + "value": "c5a.xlarge", + "name": "c5a_xlarge" }, { - "value": "m1.small", - "name": "m1_small" + "value": "c5a.2xlarge", + "name": "c5a_2xlarge" }, { - "value": "m1.medium", - "name": "m1_medium" + "value": "c5a.4xlarge", + "name": "c5a_4xlarge" }, { - "value": "m1.large", - "name": "m1_large" + "value": "c5a.8xlarge", + "name": "c5a_8xlarge" }, { - "value": "m1.xlarge", - "name": "m1_xlarge" + "value": "c5a.12xlarge", + "name": "c5a_12xlarge" }, { - "value": "m3.medium", - "name": "m3_medium" + "value": "c5a.16xlarge", + "name": "c5a_16xlarge" }, { - "value": "m3.large", - "name": "m3_large" + "value": "c5a.24xlarge", + "name": "c5a_24xlarge" }, { - "value": "m3.xlarge", - "name": "m3_xlarge" + "value": "c5ad.large", + "name": "c5ad_large" }, { - "value": "m3.2xlarge", - "name": "m3_2xlarge" + "value": "c5ad.xlarge", + "name": "c5ad_xlarge" }, { - "value": "m4.large", - "name": "m4_large" + "value": "c5ad.2xlarge", + "name": "c5ad_2xlarge" }, { - "value": "m4.xlarge", - "name": "m4_xlarge" + "value": "c5ad.4xlarge", + "name": "c5ad_4xlarge" }, { - "value": "m4.2xlarge", - "name": "m4_2xlarge" + "value": "c5ad.8xlarge", + "name": "c5ad_8xlarge" }, { - "value": "m4.4xlarge", - "name": "m4_4xlarge" + "value": "c5ad.12xlarge", + "name": "c5ad_12xlarge" }, { - "value": "m4.10xlarge", - "name": "m4_10xlarge" + "value": "c5ad.16xlarge", + "name": "c5ad_16xlarge" }, { - "value": "m4.16xlarge", - "name": "m4_16xlarge" + "value": "c5ad.24xlarge", + "name": "c5ad_24xlarge" }, { - "value": "m2.xlarge", - "name": "m2_xlarge" + "value": "c5d.large", + "name": "c5d_large" }, { - "value": "m2.2xlarge", - "name": "m2_2xlarge" + "value": "c5d.xlarge", + "name": "c5d_xlarge" }, { - "value": "m2.4xlarge", - "name": "m2_4xlarge" + "value": "c5d.2xlarge", + "name": "c5d_2xlarge" }, { - "value": "cr1.8xlarge", - "name": "cr1_8xlarge" + "value": "c5d.4xlarge", + "name": "c5d_4xlarge" }, { - "value": "r3.large", - "name": "r3_large" + "value": "c5d.9xlarge", + "name": "c5d_9xlarge" }, { - "value": "r3.xlarge", - "name": "r3_xlarge" + "value": "c5d.12xlarge", + "name": "c5d_12xlarge" }, { - "value": "r3.2xlarge", - "name": "r3_2xlarge" + "value": "c5d.18xlarge", + "name": "c5d_18xlarge" }, { - "value": "r3.4xlarge", - "name": "r3_4xlarge" + "value": "c5d.24xlarge", + "name": "c5d_24xlarge" }, { - "value": "r3.8xlarge", - "name": "r3_8xlarge" + "value": "c5d.metal", + "name": "c5d_metal" }, { - "value": "r4.large", - "name": "r4_large" + "value": "c5n.large", + "name": "c5n_large" }, { - "value": "r4.xlarge", - "name": "r4_xlarge" + "value": "c5n.xlarge", + "name": "c5n_xlarge" }, { - "value": "r4.2xlarge", - "name": "r4_2xlarge" + "value": "c5n.2xlarge", + "name": "c5n_2xlarge" }, { - "value": "r4.4xlarge", - "name": "r4_4xlarge" + "value": "c5n.4xlarge", + "name": "c5n_4xlarge" }, { - "value": "r4.8xlarge", - "name": "r4_8xlarge" + "value": "c5n.9xlarge", + "name": "c5n_9xlarge" }, { - "value": "r4.16xlarge", - "name": "r4_16xlarge" + "value": "c5n.18xlarge", + "name": "c5n_18xlarge" }, { - "value": "r5.large", - "name": "r5_large" + "value": "c5n.metal", + "name": "c5n_metal" }, { - "value": "r5.xlarge", - "name": "r5_xlarge" + "value": "c6g.medium", + "name": "c6g_medium" }, { - "value": "r5.2xlarge", - "name": "r5_2xlarge" + "value": "c6g.large", + "name": "c6g_large" }, { - "value": "r5.4xlarge", - "name": "r5_4xlarge" + "value": "c6g.xlarge", + "name": "c6g_xlarge" }, { - "value": "r5.8xlarge", - "name": "r5_8xlarge" + "value": "c6g.2xlarge", + "name": "c6g_2xlarge" }, { - "value": "r5.12xlarge", - "name": "r5_12xlarge" + "value": "c6g.4xlarge", + "name": "c6g_4xlarge" }, { - "value": "r5.16xlarge", - "name": "r5_16xlarge" + "value": "c6g.8xlarge", + "name": "c6g_8xlarge" }, { - "value": "r5.24xlarge", - "name": "r5_24xlarge" + "value": "c6g.12xlarge", + "name": "c6g_12xlarge" }, { - "value": "r5.metal", - "name": "r5_metal" + "value": "c6g.16xlarge", + "name": "c6g_16xlarge" }, { - "value": "r5a.large", - "name": "r5a_large" + "value": "c6g.metal", + "name": "c6g_metal" }, { - "value": "r5a.xlarge", - "name": "r5a_xlarge" + "value": "c6gd.medium", + "name": "c6gd_medium" }, { - "value": "r5a.2xlarge", - "name": "r5a_2xlarge" + "value": "c6gd.large", + "name": "c6gd_large" }, { - "value": "r5a.4xlarge", - "name": "r5a_4xlarge" + "value": "c6gd.xlarge", + "name": "c6gd_xlarge" }, { - "value": "r5a.8xlarge", - "name": "r5a_8xlarge" + "value": "c6gd.2xlarge", + "name": "c6gd_2xlarge" }, { - "value": "r5a.12xlarge", - "name": "r5a_12xlarge" + "value": "c6gd.4xlarge", + "name": "c6gd_4xlarge" }, { - "value": "r5a.16xlarge", - "name": "r5a_16xlarge" + "value": "c6gd.8xlarge", + "name": "c6gd_8xlarge" }, { - "value": "r5a.24xlarge", - "name": "r5a_24xlarge" + "value": "c6gd.12xlarge", + "name": "c6gd_12xlarge" }, { - "value": "r5b.large", - "name": "r5b_large" + "value": "c6gd.16xlarge", + "name": "c6gd_16xlarge" }, { - "value": "r5b.xlarge", - "name": "r5b_xlarge" + "value": "c6gd.metal", + "name": "c6gd_metal" }, { - "value": "r5b.2xlarge", - "name": "r5b_2xlarge" + "value": "c6gn.medium", + "name": "c6gn_medium" }, { - "value": "r5b.4xlarge", - "name": "r5b_4xlarge" + "value": "c6gn.large", + "name": "c6gn_large" }, { - "value": "r5b.8xlarge", - "name": "r5b_8xlarge" + "value": "c6gn.xlarge", + "name": "c6gn_xlarge" }, { - "value": "r5b.12xlarge", - "name": "r5b_12xlarge" + "value": "c6gn.2xlarge", + "name": "c6gn_2xlarge" }, { - "value": "r5b.16xlarge", - "name": "r5b_16xlarge" + "value": "c6gn.4xlarge", + "name": "c6gn_4xlarge" }, { - "value": "r5b.24xlarge", - "name": "r5b_24xlarge" + "value": "c6gn.8xlarge", + "name": "c6gn_8xlarge" }, { - "value": "r5b.metal", - "name": "r5b_metal" + "value": "c6gn.12xlarge", + "name": "c6gn_12xlarge" }, { - "value": "r5d.large", - "name": "r5d_large" + "value": "c6gn.16xlarge", + "name": "c6gn_16xlarge" }, { - "value": "r5d.xlarge", - "name": "r5d_xlarge" + "value": "c6i.large", + "name": "c6i_large" }, { - "value": "r5d.2xlarge", - "name": "r5d_2xlarge" + "value": "c6i.xlarge", + "name": "c6i_xlarge" }, { - "value": "r5d.4xlarge", - "name": "r5d_4xlarge" + "value": "c6i.2xlarge", + "name": "c6i_2xlarge" }, { - "value": "r5d.8xlarge", - "name": "r5d_8xlarge" + "value": "c6i.4xlarge", + "name": "c6i_4xlarge" }, { - "value": "r5d.12xlarge", - "name": "r5d_12xlarge" + "value": "c6i.8xlarge", + "name": "c6i_8xlarge" }, { - "value": "r5d.16xlarge", - "name": "r5d_16xlarge" + "value": "c6i.12xlarge", + "name": "c6i_12xlarge" }, { - "value": "r5d.24xlarge", - "name": "r5d_24xlarge" + "value": "c6i.16xlarge", + "name": "c6i_16xlarge" }, { - "value": "r5d.metal", - "name": "r5d_metal" + "value": "c6i.24xlarge", + "name": "c6i_24xlarge" }, { - "value": "r5ad.large", - "name": "r5ad_large" + "value": "c6i.32xlarge", + "name": "c6i_32xlarge" }, { - "value": "r5ad.xlarge", - "name": "r5ad_xlarge" + "value": "cc1.4xlarge", + "name": "cc1_4xlarge" }, { - "value": "r5ad.2xlarge", - "name": "r5ad_2xlarge" + "value": "cc2.8xlarge", + "name": "cc2_8xlarge" }, { - "value": "r5ad.4xlarge", - "name": "r5ad_4xlarge" + "value": "cg1.4xlarge", + "name": "cg1_4xlarge" }, { - "value": "r5ad.8xlarge", - "name": "r5ad_8xlarge" + "value": "cr1.8xlarge", + "name": "cr1_8xlarge" }, { - "value": "r5ad.12xlarge", - "name": "r5ad_12xlarge" + "value": "d2.xlarge", + "name": "d2_xlarge" }, { - "value": "r5ad.16xlarge", - "name": "r5ad_16xlarge" + "value": "d2.2xlarge", + "name": "d2_2xlarge" }, { - "value": "r5ad.24xlarge", - "name": "r5ad_24xlarge" + "value": "d2.4xlarge", + "name": "d2_4xlarge" }, { - "value": "r6g.metal", - "name": "r6g_metal" + "value": "d2.8xlarge", + "name": "d2_8xlarge" }, { - "value": "r6g.medium", - "name": "r6g_medium" + "value": "d3.xlarge", + "name": "d3_xlarge" }, { - "value": "r6g.large", - "name": "r6g_large" + "value": "d3.2xlarge", + "name": "d3_2xlarge" }, { - "value": "r6g.xlarge", - "name": "r6g_xlarge" + "value": "d3.4xlarge", + "name": "d3_4xlarge" }, { - "value": "r6g.2xlarge", - "name": "r6g_2xlarge" + "value": "d3.8xlarge", + "name": "d3_8xlarge" }, { - "value": "r6g.4xlarge", - "name": "r6g_4xlarge" + "value": "d3en.xlarge", + "name": "d3en_xlarge" }, { - "value": "r6g.8xlarge", - "name": "r6g_8xlarge" + "value": "d3en.2xlarge", + "name": "d3en_2xlarge" }, { - "value": "r6g.12xlarge", - "name": "r6g_12xlarge" + "value": "d3en.4xlarge", + "name": "d3en_4xlarge" }, { - "value": "r6g.16xlarge", - "name": "r6g_16xlarge" + "value": "d3en.6xlarge", + "name": "d3en_6xlarge" }, { - "value": "r6gd.metal", - "name": "r6gd_metal" + "value": "d3en.8xlarge", + "name": "d3en_8xlarge" }, { - "value": "r6gd.medium", - "name": "r6gd_medium" + "value": "d3en.12xlarge", + "name": "d3en_12xlarge" }, { - "value": "r6gd.large", - "name": "r6gd_large" + "value": "dl1.24xlarge", + "name": "dl1_24xlarge" }, { - "value": "r6gd.xlarge", - "name": "r6gd_xlarge" + "value": "f1.2xlarge", + "name": "f1_2xlarge" }, { - "value": "r6gd.2xlarge", - "name": "r6gd_2xlarge" + "value": "f1.4xlarge", + "name": "f1_4xlarge" }, { - "value": "r6gd.4xlarge", - "name": "r6gd_4xlarge" + "value": "f1.16xlarge", + "name": "f1_16xlarge" }, { - "value": "r6gd.8xlarge", - "name": "r6gd_8xlarge" + "value": "g2.2xlarge", + "name": "g2_2xlarge" }, { - "value": "r6gd.12xlarge", - "name": "r6gd_12xlarge" + "value": "g2.8xlarge", + "name": "g2_8xlarge" }, { - "value": "r6gd.16xlarge", - "name": "r6gd_16xlarge" + "value": "g3.4xlarge", + "name": "g3_4xlarge" }, { - "value": "x1.16xlarge", - "name": "x1_16xlarge" - }, - { - "value": "x1.32xlarge", - "name": "x1_32xlarge" - }, - { - "value": "x1e.xlarge", - "name": "x1e_xlarge" - }, - { - "value": "x1e.2xlarge", - "name": "x1e_2xlarge" + "value": "g3.8xlarge", + "name": "g3_8xlarge" }, { - "value": "x1e.4xlarge", - "name": "x1e_4xlarge" + "value": "g3.16xlarge", + "name": "g3_16xlarge" }, { - "value": "x1e.8xlarge", - "name": "x1e_8xlarge" + "value": "g3s.xlarge", + "name": "g3s_xlarge" }, { - "value": "x1e.16xlarge", - "name": "x1e_16xlarge" + "value": "g4ad.xlarge", + "name": "g4ad_xlarge" }, { - "value": "x1e.32xlarge", - "name": "x1e_32xlarge" + "value": "g4ad.2xlarge", + "name": "g4ad_2xlarge" }, { - "value": "i2.xlarge", - "name": "i2_xlarge" + "value": "g4ad.4xlarge", + "name": "g4ad_4xlarge" }, { - "value": "i2.2xlarge", - "name": "i2_2xlarge" + "value": "g4ad.8xlarge", + "name": "g4ad_8xlarge" }, { - "value": "i2.4xlarge", - "name": "i2_4xlarge" + "value": "g4ad.16xlarge", + "name": "g4ad_16xlarge" }, { - "value": "i2.8xlarge", - "name": "i2_8xlarge" + "value": "g4dn.xlarge", + "name": "g4dn_xlarge" }, { - "value": "i3.large", - "name": "i3_large" + "value": "g4dn.2xlarge", + "name": "g4dn_2xlarge" }, { - "value": "i3.xlarge", - "name": "i3_xlarge" + "value": "g4dn.4xlarge", + "name": "g4dn_4xlarge" }, { - "value": "i3.2xlarge", - "name": "i3_2xlarge" + "value": "g4dn.8xlarge", + "name": "g4dn_8xlarge" }, { - "value": "i3.4xlarge", - "name": "i3_4xlarge" + "value": "g4dn.12xlarge", + "name": "g4dn_12xlarge" }, { - "value": "i3.8xlarge", - "name": "i3_8xlarge" + "value": "g4dn.16xlarge", + "name": "g4dn_16xlarge" }, { - "value": "i3.16xlarge", - "name": "i3_16xlarge" + "value": "g4dn.metal", + "name": "g4dn_metal" }, { - "value": "i3.metal", - "name": "i3_metal" + "value": "g5.xlarge", + "name": "g5_xlarge" }, { - "value": "i3en.large", - "name": "i3en_large" + "value": "g5.2xlarge", + "name": "g5_2xlarge" }, { - "value": "i3en.xlarge", - "name": "i3en_xlarge" + "value": "g5.4xlarge", + "name": "g5_4xlarge" }, { - "value": "i3en.2xlarge", - "name": "i3en_2xlarge" + "value": "g5.8xlarge", + "name": "g5_8xlarge" }, { - "value": "i3en.3xlarge", - "name": "i3en_3xlarge" + "value": "g5.12xlarge", + "name": "g5_12xlarge" }, { - "value": "i3en.6xlarge", - "name": "i3en_6xlarge" + "value": "g5.16xlarge", + "name": "g5_16xlarge" }, { - "value": "i3en.12xlarge", - "name": "i3en_12xlarge" + "value": "g5.24xlarge", + "name": "g5_24xlarge" }, { - "value": "i3en.24xlarge", - "name": "i3en_24xlarge" + "value": "g5.48xlarge", + "name": "g5_48xlarge" }, { - "value": "i3en.metal", - "name": "i3en_metal" + "value": "g5g.xlarge", + "name": "g5g_xlarge" }, { - "value": "hi1.4xlarge", - "name": "hi1_4xlarge" + "value": "g5g.2xlarge", + "name": "g5g_2xlarge" }, { - "value": "hs1.8xlarge", - "name": "hs1_8xlarge" + "value": "g5g.4xlarge", + "name": "g5g_4xlarge" }, { - "value": "c1.medium", - "name": "c1_medium" + "value": "g5g.8xlarge", + "name": "g5g_8xlarge" }, { - "value": "c1.xlarge", - "name": "c1_xlarge" + "value": "g5g.16xlarge", + "name": "g5g_16xlarge" }, { - "value": "c3.large", - "name": "c3_large" + "value": "g5g.metal", + "name": "g5g_metal" }, { - "value": "c3.xlarge", - "name": "c3_xlarge" + "value": "hi1.4xlarge", + "name": "hi1_4xlarge" }, { - "value": "c3.2xlarge", - "name": "c3_2xlarge" + "value": "hs1.8xlarge", + "name": "hs1_8xlarge" }, { - "value": "c3.4xlarge", - "name": "c3_4xlarge" + "value": "h1.2xlarge", + "name": "h1_2xlarge" }, { - "value": "c3.8xlarge", - "name": "c3_8xlarge" + "value": "h1.4xlarge", + "name": "h1_4xlarge" }, { - "value": "c4.large", - "name": "c4_large" + "value": "h1.8xlarge", + "name": "h1_8xlarge" }, { - "value": "c4.xlarge", - "name": "c4_xlarge" + "value": "h1.16xlarge", + "name": "h1_16xlarge" }, { - "value": "c4.2xlarge", - "name": "c4_2xlarge" + "value": "i2.xlarge", + "name": "i2_xlarge" }, { - "value": "c4.4xlarge", - "name": "c4_4xlarge" + "value": "i2.2xlarge", + "name": "i2_2xlarge" }, { - "value": "c4.8xlarge", - "name": "c4_8xlarge" + "value": "i2.4xlarge", + "name": "i2_4xlarge" }, { - "value": "c5.large", - "name": "c5_large" + "value": "i2.8xlarge", + "name": "i2_8xlarge" }, { - "value": "c5.xlarge", - "name": "c5_xlarge" + "value": "i3.large", + "name": "i3_large" }, { - "value": "c5.2xlarge", - "name": "c5_2xlarge" + "value": "i3.xlarge", + "name": "i3_xlarge" }, { - "value": "c5.4xlarge", - "name": "c5_4xlarge" + "value": "i3.2xlarge", + "name": "i3_2xlarge" }, { - "value": "c5.9xlarge", - "name": "c5_9xlarge" + "value": "i3.4xlarge", + "name": "i3_4xlarge" }, { - "value": "c5.12xlarge", - "name": "c5_12xlarge" + "value": "i3.8xlarge", + "name": "i3_8xlarge" }, { - "value": "c5.18xlarge", - "name": "c5_18xlarge" + "value": "i3.16xlarge", + "name": "i3_16xlarge" }, { - "value": "c5.24xlarge", - "name": "c5_24xlarge" + "value": "i3.metal", + "name": "i3_metal" }, { - "value": "c5.metal", - "name": "c5_metal" + "value": "i3en.large", + "name": "i3en_large" }, { - "value": "c5a.large", - "name": "c5a_large" + "value": "i3en.xlarge", + "name": "i3en_xlarge" }, { - "value": "c5a.xlarge", - "name": "c5a_xlarge" + "value": "i3en.2xlarge", + "name": "i3en_2xlarge" }, { - "value": "c5a.2xlarge", - "name": "c5a_2xlarge" + "value": "i3en.3xlarge", + "name": "i3en_3xlarge" }, { - "value": "c5a.4xlarge", - "name": "c5a_4xlarge" + "value": "i3en.6xlarge", + "name": "i3en_6xlarge" }, { - "value": "c5a.8xlarge", - "name": "c5a_8xlarge" + "value": "i3en.12xlarge", + "name": "i3en_12xlarge" }, { - "value": "c5a.12xlarge", - "name": "c5a_12xlarge" + "value": "i3en.24xlarge", + "name": "i3en_24xlarge" }, { - "value": "c5a.16xlarge", - "name": "c5a_16xlarge" + "value": "i3en.metal", + "name": "i3en_metal" }, { - "value": "c5a.24xlarge", - "name": "c5a_24xlarge" + "value": "im4gn.large", + "name": "im4gn_large" }, { - "value": "c5ad.large", - "name": "c5ad_large" + "value": "im4gn.xlarge", + "name": "im4gn_xlarge" }, { - "value": "c5ad.xlarge", - "name": "c5ad_xlarge" + "value": "im4gn.2xlarge", + "name": "im4gn_2xlarge" }, { - "value": "c5ad.2xlarge", - "name": "c5ad_2xlarge" + "value": "im4gn.4xlarge", + "name": "im4gn_4xlarge" }, { - "value": "c5ad.4xlarge", - "name": "c5ad_4xlarge" + "value": "im4gn.8xlarge", + "name": "im4gn_8xlarge" }, { - "value": "c5ad.8xlarge", - "name": "c5ad_8xlarge" + "value": "im4gn.16xlarge", + "name": "im4gn_16xlarge" }, { - "value": "c5ad.12xlarge", - "name": "c5ad_12xlarge" + "value": "inf1.xlarge", + "name": "inf1_xlarge" }, { - "value": "c5ad.16xlarge", - "name": "c5ad_16xlarge" + "value": "inf1.2xlarge", + "name": "inf1_2xlarge" }, { - "value": "c5ad.24xlarge", - "name": "c5ad_24xlarge" + "value": "inf1.6xlarge", + "name": "inf1_6xlarge" }, { - "value": "c5d.large", - "name": "c5d_large" + "value": "inf1.24xlarge", + "name": "inf1_24xlarge" }, { - "value": "c5d.xlarge", - "name": "c5d_xlarge" + "value": "is4gen.medium", + "name": "is4gen_medium" }, { - "value": "c5d.2xlarge", - "name": "c5d_2xlarge" + "value": "is4gen.large", + "name": "is4gen_large" }, { - "value": "c5d.4xlarge", - "name": "c5d_4xlarge" + "value": "is4gen.xlarge", + "name": "is4gen_xlarge" }, { - "value": "c5d.9xlarge", - "name": "c5d_9xlarge" + "value": "is4gen.2xlarge", + "name": "is4gen_2xlarge" }, { - "value": "c5d.12xlarge", - "name": "c5d_12xlarge" + "value": "is4gen.4xlarge", + "name": "is4gen_4xlarge" }, { - "value": "c5d.18xlarge", - "name": "c5d_18xlarge" + "value": "is4gen.8xlarge", + "name": "is4gen_8xlarge" }, { - "value": "c5d.24xlarge", - "name": "c5d_24xlarge" + "value": "m1.small", + "name": "m1_small" }, { - "value": "c5d.metal", - "name": "c5d_metal" + "value": "m1.medium", + "name": "m1_medium" }, { - "value": "c5n.large", - "name": "c5n_large" + "value": "m1.large", + "name": "m1_large" }, { - "value": "c5n.xlarge", - "name": "c5n_xlarge" + "value": "m1.xlarge", + "name": "m1_xlarge" }, { - "value": "c5n.2xlarge", - "name": "c5n_2xlarge" + "value": "m2.xlarge", + "name": "m2_xlarge" }, { - "value": "c5n.4xlarge", - "name": "c5n_4xlarge" + "value": "m2.2xlarge", + "name": "m2_2xlarge" }, { - "value": "c5n.9xlarge", - "name": "c5n_9xlarge" + "value": "m2.4xlarge", + "name": "m2_4xlarge" }, { - "value": "c5n.18xlarge", - "name": "c5n_18xlarge" + "value": "m3.medium", + "name": "m3_medium" }, { - "value": "c5n.metal", - "name": "c5n_metal" + "value": "m3.large", + "name": "m3_large" }, { - "value": "c6g.metal", - "name": "c6g_metal" + "value": "m3.xlarge", + "name": "m3_xlarge" }, { - "value": "c6g.medium", - "name": "c6g_medium" + "value": "m3.2xlarge", + "name": "m3_2xlarge" }, { - "value": "c6g.large", - "name": "c6g_large" + "value": "m4.large", + "name": "m4_large" }, { - "value": "c6g.xlarge", - "name": "c6g_xlarge" + "value": "m4.xlarge", + "name": "m4_xlarge" }, { - "value": "c6g.2xlarge", - "name": "c6g_2xlarge" + "value": "m4.2xlarge", + "name": "m4_2xlarge" }, { - "value": "c6g.4xlarge", - "name": "c6g_4xlarge" + "value": "m4.4xlarge", + "name": "m4_4xlarge" }, { - "value": "c6g.8xlarge", - "name": "c6g_8xlarge" + "value": "m4.10xlarge", + "name": "m4_10xlarge" }, { - "value": "c6g.12xlarge", - "name": "c6g_12xlarge" + "value": "m4.16xlarge", + "name": "m4_16xlarge" }, { - "value": "c6g.16xlarge", - "name": "c6g_16xlarge" + "value": "m5.large", + "name": "m5_large" }, { - "value": "c6gd.metal", - "name": "c6gd_metal" + "value": "m5.xlarge", + "name": "m5_xlarge" }, { - "value": "c6gd.medium", - "name": "c6gd_medium" + "value": "m5.2xlarge", + "name": "m5_2xlarge" }, { - "value": "c6gd.large", - "name": "c6gd_large" + "value": "m5.4xlarge", + "name": "m5_4xlarge" }, { - "value": "c6gd.xlarge", - "name": "c6gd_xlarge" + "value": "m5.8xlarge", + "name": "m5_8xlarge" }, { - "value": "c6gd.2xlarge", - "name": "c6gd_2xlarge" + "value": "m5.12xlarge", + "name": "m5_12xlarge" }, { - "value": "c6gd.4xlarge", - "name": "c6gd_4xlarge" + "value": "m5.16xlarge", + "name": "m5_16xlarge" }, { - "value": "c6gd.8xlarge", - "name": "c6gd_8xlarge" + "value": "m5.24xlarge", + "name": "m5_24xlarge" }, { - "value": "c6gd.12xlarge", - "name": "c6gd_12xlarge" + "value": "m5.metal", + "name": "m5_metal" }, { - "value": "c6gd.16xlarge", - "name": "c6gd_16xlarge" + "value": "m5a.large", + "name": "m5a_large" }, { - "value": "c6gn.medium", - "name": "c6gn_medium" + "value": "m5a.xlarge", + "name": "m5a_xlarge" }, { - "value": "c6gn.large", - "name": "c6gn_large" + "value": "m5a.2xlarge", + "name": "m5a_2xlarge" }, { - "value": "c6gn.xlarge", - "name": "c6gn_xlarge" + "value": "m5a.4xlarge", + "name": "m5a_4xlarge" }, { - "value": "c6gn.2xlarge", - "name": "c6gn_2xlarge" + "value": "m5a.8xlarge", + "name": "m5a_8xlarge" }, { - "value": "c6gn.4xlarge", - "name": "c6gn_4xlarge" + "value": "m5a.12xlarge", + "name": "m5a_12xlarge" }, { - "value": "c6gn.8xlarge", - "name": "c6gn_8xlarge" + "value": "m5a.16xlarge", + "name": "m5a_16xlarge" }, { - "value": "c6gn.12xlarge", - "name": "c6gn_12xlarge" + "value": "m5a.24xlarge", + "name": "m5a_24xlarge" }, { - "value": "c6gn.16xlarge", - "name": "c6gn_16xlarge" + "value": "m5ad.large", + "name": "m5ad_large" }, { - "value": "c6i.large", - "name": "c6i_large" + "value": "m5ad.xlarge", + "name": "m5ad_xlarge" }, { - "value": "c6i.xlarge", - "name": "c6i_xlarge" + "value": "m5ad.2xlarge", + "name": "m5ad_2xlarge" }, { - "value": "c6i.2xlarge", - "name": "c6i_2xlarge" + "value": "m5ad.4xlarge", + "name": "m5ad_4xlarge" }, { - "value": "c6i.4xlarge", - "name": "c6i_4xlarge" + "value": "m5ad.8xlarge", + "name": "m5ad_8xlarge" }, { - "value": "c6i.8xlarge", - "name": "c6i_8xlarge" + "value": "m5ad.12xlarge", + "name": "m5ad_12xlarge" }, { - "value": "c6i.12xlarge", - "name": "c6i_12xlarge" + "value": "m5ad.16xlarge", + "name": "m5ad_16xlarge" }, { - "value": "c6i.16xlarge", - "name": "c6i_16xlarge" + "value": "m5ad.24xlarge", + "name": "m5ad_24xlarge" }, { - "value": "c6i.24xlarge", - "name": "c6i_24xlarge" + "value": "m5d.large", + "name": "m5d_large" }, { - "value": "c6i.32xlarge", - "name": "c6i_32xlarge" + "value": "m5d.xlarge", + "name": "m5d_xlarge" }, { - "value": "cc1.4xlarge", - "name": "cc1_4xlarge" + "value": "m5d.2xlarge", + "name": "m5d_2xlarge" }, { - "value": "cc2.8xlarge", - "name": "cc2_8xlarge" + "value": "m5d.4xlarge", + "name": "m5d_4xlarge" }, { - "value": "g2.2xlarge", - "name": "g2_2xlarge" + "value": "m5d.8xlarge", + "name": "m5d_8xlarge" }, { - "value": "g2.8xlarge", - "name": "g2_8xlarge" + "value": "m5d.12xlarge", + "name": "m5d_12xlarge" }, { - "value": "g3.4xlarge", - "name": "g3_4xlarge" + "value": "m5d.16xlarge", + "name": "m5d_16xlarge" }, { - "value": "g3.8xlarge", - "name": "g3_8xlarge" + "value": "m5d.24xlarge", + "name": "m5d_24xlarge" }, { - "value": "g3.16xlarge", - "name": "g3_16xlarge" + "value": "m5d.metal", + "name": "m5d_metal" }, { - "value": "g3s.xlarge", - "name": "g3s_xlarge" + "value": "m5dn.large", + "name": "m5dn_large" }, { - "value": "g4ad.xlarge", - "name": "g4ad_xlarge" + "value": "m5dn.xlarge", + "name": "m5dn_xlarge" }, { - "value": "g4ad.2xlarge", - "name": "g4ad_2xlarge" + "value": "m5dn.2xlarge", + "name": "m5dn_2xlarge" }, { - "value": "g4ad.4xlarge", - "name": "g4ad_4xlarge" + "value": "m5dn.4xlarge", + "name": "m5dn_4xlarge" }, { - "value": "g4ad.8xlarge", - "name": "g4ad_8xlarge" + "value": "m5dn.8xlarge", + "name": "m5dn_8xlarge" }, { - "value": "g4ad.16xlarge", - "name": "g4ad_16xlarge" + "value": "m5dn.12xlarge", + "name": "m5dn_12xlarge" }, { - "value": "g4dn.xlarge", - "name": "g4dn_xlarge" + "value": "m5dn.16xlarge", + "name": "m5dn_16xlarge" }, { - "value": "g4dn.2xlarge", - "name": "g4dn_2xlarge" + "value": "m5dn.24xlarge", + "name": "m5dn_24xlarge" }, { - "value": "g4dn.4xlarge", - "name": "g4dn_4xlarge" + "value": "m5dn.metal", + "name": "m5dn_metal" }, { - "value": "g4dn.8xlarge", - "name": "g4dn_8xlarge" + "value": "m5n.large", + "name": "m5n_large" }, { - "value": "g4dn.12xlarge", - "name": "g4dn_12xlarge" + "value": "m5n.xlarge", + "name": "m5n_xlarge" }, { - "value": "g4dn.16xlarge", - "name": "g4dn_16xlarge" + "value": "m5n.2xlarge", + "name": "m5n_2xlarge" }, { - "value": "g4dn.metal", - "name": "g4dn_metal" + "value": "m5n.4xlarge", + "name": "m5n_4xlarge" }, { - "value": "cg1.4xlarge", - "name": "cg1_4xlarge" + "value": "m5n.8xlarge", + "name": "m5n_8xlarge" }, { - "value": "p2.xlarge", - "name": "p2_xlarge" + "value": "m5n.12xlarge", + "name": "m5n_12xlarge" }, { - "value": "p2.8xlarge", - "name": "p2_8xlarge" + "value": "m5n.16xlarge", + "name": "m5n_16xlarge" }, { - "value": "p2.16xlarge", - "name": "p2_16xlarge" + "value": "m5n.24xlarge", + "name": "m5n_24xlarge" }, { - "value": "p3.2xlarge", - "name": "p3_2xlarge" + "value": "m5n.metal", + "name": "m5n_metal" }, { - "value": "p3.8xlarge", - "name": "p3_8xlarge" + "value": "m5zn.large", + "name": "m5zn_large" }, { - "value": "p3.16xlarge", - "name": "p3_16xlarge" + "value": "m5zn.xlarge", + "name": "m5zn_xlarge" }, { - "value": "p3dn.24xlarge", - "name": "p3dn_24xlarge" + "value": "m5zn.2xlarge", + "name": "m5zn_2xlarge" }, { - "value": "p4d.24xlarge", - "name": "p4d_24xlarge" + "value": "m5zn.3xlarge", + "name": "m5zn_3xlarge" }, { - "value": "d2.xlarge", - "name": "d2_xlarge" + "value": "m5zn.6xlarge", + "name": "m5zn_6xlarge" }, { - "value": "d2.2xlarge", - "name": "d2_2xlarge" + "value": "m5zn.12xlarge", + "name": "m5zn_12xlarge" }, { - "value": "d2.4xlarge", - "name": "d2_4xlarge" + "value": "m5zn.metal", + "name": "m5zn_metal" }, { - "value": "d2.8xlarge", - "name": "d2_8xlarge" + "value": "m6a.large", + "name": "m6a_large" }, { - "value": "d3.xlarge", - "name": "d3_xlarge" + "value": "m6a.xlarge", + "name": "m6a_xlarge" }, { - "value": "d3.2xlarge", - "name": "d3_2xlarge" + "value": "m6a.2xlarge", + "name": "m6a_2xlarge" }, { - "value": "d3.4xlarge", - "name": "d3_4xlarge" + "value": "m6a.4xlarge", + "name": "m6a_4xlarge" }, { - "value": "d3.8xlarge", - "name": "d3_8xlarge" + "value": "m6a.8xlarge", + "name": "m6a_8xlarge" }, { - "value": "d3en.xlarge", - "name": "d3en_xlarge" + "value": "m6a.12xlarge", + "name": "m6a_12xlarge" }, { - "value": "d3en.2xlarge", - "name": "d3en_2xlarge" + "value": "m6a.16xlarge", + "name": "m6a_16xlarge" }, { - "value": "d3en.4xlarge", - "name": "d3en_4xlarge" + "value": "m6a.24xlarge", + "name": "m6a_24xlarge" }, { - "value": "d3en.6xlarge", - "name": "d3en_6xlarge" + "value": "m6a.32xlarge", + "name": "m6a_32xlarge" }, { - "value": "d3en.8xlarge", - "name": "d3en_8xlarge" + "value": "m6a.48xlarge", + "name": "m6a_48xlarge" }, { - "value": "d3en.12xlarge", - "name": "d3en_12xlarge" + "value": "m6g.metal", + "name": "m6g_metal" }, { - "value": "dl1.24xlarge", - "name": "dl1_24xlarge" + "value": "m6g.medium", + "name": "m6g_medium" }, { - "value": "f1.2xlarge", - "name": "f1_2xlarge" + "value": "m6g.large", + "name": "m6g_large" }, { - "value": "f1.4xlarge", - "name": "f1_4xlarge" + "value": "m6g.xlarge", + "name": "m6g_xlarge" }, { - "value": "f1.16xlarge", - "name": "f1_16xlarge" + "value": "m6g.2xlarge", + "name": "m6g_2xlarge" }, { - "value": "m5.large", - "name": "m5_large" + "value": "m6g.4xlarge", + "name": "m6g_4xlarge" }, { - "value": "m5.xlarge", - "name": "m5_xlarge" + "value": "m6g.8xlarge", + "name": "m6g_8xlarge" }, { - "value": "m5.2xlarge", - "name": "m5_2xlarge" + "value": "m6g.12xlarge", + "name": "m6g_12xlarge" }, { - "value": "m5.4xlarge", - "name": "m5_4xlarge" + "value": "m6g.16xlarge", + "name": "m6g_16xlarge" }, { - "value": "m5.8xlarge", - "name": "m5_8xlarge" + "value": "m6gd.metal", + "name": "m6gd_metal" }, { - "value": "m5.12xlarge", - "name": "m5_12xlarge" + "value": "m6gd.medium", + "name": "m6gd_medium" }, { - "value": "m5.16xlarge", - "name": "m5_16xlarge" + "value": "m6gd.large", + "name": "m6gd_large" }, { - "value": "m5.24xlarge", - "name": "m5_24xlarge" + "value": "m6gd.xlarge", + "name": "m6gd_xlarge" }, { - "value": "m5.metal", - "name": "m5_metal" + "value": "m6gd.2xlarge", + "name": "m6gd_2xlarge" }, { - "value": "m5a.large", - "name": "m5a_large" + "value": "m6gd.4xlarge", + "name": "m6gd_4xlarge" }, { - "value": "m5a.xlarge", - "name": "m5a_xlarge" + "value": "m6gd.8xlarge", + "name": "m6gd_8xlarge" }, { - "value": "m5a.2xlarge", - "name": "m5a_2xlarge" + "value": "m6gd.12xlarge", + "name": "m6gd_12xlarge" }, { - "value": "m5a.4xlarge", - "name": "m5a_4xlarge" + "value": "m6gd.16xlarge", + "name": "m6gd_16xlarge" }, { - "value": "m5a.8xlarge", - "name": "m5a_8xlarge" + "value": "m6i.large", + "name": "m6i_large" }, { - "value": "m5a.12xlarge", - "name": "m5a_12xlarge" + "value": "m6i.xlarge", + "name": "m6i_xlarge" }, { - "value": "m5a.16xlarge", - "name": "m5a_16xlarge" + "value": "m6i.2xlarge", + "name": "m6i_2xlarge" }, { - "value": "m5a.24xlarge", - "name": "m5a_24xlarge" + "value": "m6i.4xlarge", + "name": "m6i_4xlarge" }, { - "value": "m5d.large", - "name": "m5d_large" + "value": "m6i.8xlarge", + "name": "m6i_8xlarge" }, { - "value": "m5d.xlarge", - "name": "m5d_xlarge" + "value": "m6i.12xlarge", + "name": "m6i_12xlarge" }, { - "value": "m5d.2xlarge", - "name": "m5d_2xlarge" + "value": "m6i.16xlarge", + "name": "m6i_16xlarge" }, { - "value": "m5d.4xlarge", - "name": "m5d_4xlarge" + "value": "m6i.24xlarge", + "name": "m6i_24xlarge" }, { - "value": "m5d.8xlarge", - "name": "m5d_8xlarge" + "value": "m6i.32xlarge", + "name": "m6i_32xlarge" }, { - "value": "m5d.12xlarge", - "name": "m5d_12xlarge" + "value": "mac1.metal", + "name": "mac1_metal" }, { - "value": "m5d.16xlarge", - "name": "m5d_16xlarge" + "value": "p2.xlarge", + "name": "p2_xlarge" }, { - "value": "m5d.24xlarge", - "name": "m5d_24xlarge" + "value": "p2.8xlarge", + "name": "p2_8xlarge" }, { - "value": "m5d.metal", - "name": "m5d_metal" + "value": "p2.16xlarge", + "name": "p2_16xlarge" }, { - "value": "m5ad.large", - "name": "m5ad_large" + "value": "p3.2xlarge", + "name": "p3_2xlarge" }, { - "value": "m5ad.xlarge", - "name": "m5ad_xlarge" + "value": "p3.8xlarge", + "name": "p3_8xlarge" }, { - "value": "m5ad.2xlarge", - "name": "m5ad_2xlarge" + "value": "p3.16xlarge", + "name": "p3_16xlarge" }, { - "value": "m5ad.4xlarge", - "name": "m5ad_4xlarge" + "value": "p3dn.24xlarge", + "name": "p3dn_24xlarge" }, { - "value": "m5ad.8xlarge", - "name": "m5ad_8xlarge" + "value": "p4d.24xlarge", + "name": "p4d_24xlarge" }, { - "value": "m5ad.12xlarge", - "name": "m5ad_12xlarge" + "value": "r3.large", + "name": "r3_large" }, { - "value": "m5ad.16xlarge", - "name": "m5ad_16xlarge" + "value": "r3.xlarge", + "name": "r3_xlarge" }, { - "value": "m5ad.24xlarge", - "name": "m5ad_24xlarge" + "value": "r3.2xlarge", + "name": "r3_2xlarge" }, { - "value": "m5zn.large", - "name": "m5zn_large" + "value": "r3.4xlarge", + "name": "r3_4xlarge" }, { - "value": "m5zn.xlarge", - "name": "m5zn_xlarge" + "value": "r3.8xlarge", + "name": "r3_8xlarge" }, { - "value": "m5zn.2xlarge", - "name": "m5zn_2xlarge" + "value": "r4.large", + "name": "r4_large" }, { - "value": "m5zn.3xlarge", - "name": "m5zn_3xlarge" + "value": "r4.xlarge", + "name": "r4_xlarge" }, { - "value": "m5zn.6xlarge", - "name": "m5zn_6xlarge" + "value": "r4.2xlarge", + "name": "r4_2xlarge" }, { - "value": "m5zn.12xlarge", - "name": "m5zn_12xlarge" + "value": "r4.4xlarge", + "name": "r4_4xlarge" }, { - "value": "m5zn.metal", - "name": "m5zn_metal" + "value": "r4.8xlarge", + "name": "r4_8xlarge" }, { - "value": "h1.2xlarge", - "name": "h1_2xlarge" + "value": "r4.16xlarge", + "name": "r4_16xlarge" }, { - "value": "h1.4xlarge", - "name": "h1_4xlarge" + "value": "r5.large", + "name": "r5_large" }, { - "value": "h1.8xlarge", - "name": "h1_8xlarge" + "value": "r5.xlarge", + "name": "r5_xlarge" }, { - "value": "h1.16xlarge", - "name": "h1_16xlarge" + "value": "r5.2xlarge", + "name": "r5_2xlarge" }, { - "value": "z1d.large", - "name": "z1d_large" + "value": "r5.4xlarge", + "name": "r5_4xlarge" }, { - "value": "z1d.xlarge", - "name": "z1d_xlarge" + "value": "r5.8xlarge", + "name": "r5_8xlarge" }, { - "value": "z1d.2xlarge", - "name": "z1d_2xlarge" + "value": "r5.12xlarge", + "name": "r5_12xlarge" }, { - "value": "z1d.3xlarge", - "name": "z1d_3xlarge" + "value": "r5.16xlarge", + "name": "r5_16xlarge" }, { - "value": "z1d.6xlarge", - "name": "z1d_6xlarge" + "value": "r5.24xlarge", + "name": "r5_24xlarge" }, { - "value": "z1d.12xlarge", - "name": "z1d_12xlarge" + "value": "r5.metal", + "name": "r5_metal" }, { - "value": "z1d.metal", - "name": "z1d_metal" + "value": "r5a.large", + "name": "r5a_large" }, { - "value": "u-6tb1.56xlarge", - "name": "u_6tb1_56xlarge" + "value": "r5a.xlarge", + "name": "r5a_xlarge" }, { - "value": "u-6tb1.112xlarge", - "name": "u_6tb1_112xlarge" + "value": "r5a.2xlarge", + "name": "r5a_2xlarge" }, { - "value": "u-9tb1.112xlarge", - "name": "u_9tb1_112xlarge" + "value": "r5a.4xlarge", + "name": "r5a_4xlarge" }, { - "value": "u-12tb1.112xlarge", - "name": "u_12tb1_112xlarge" + "value": "r5a.8xlarge", + "name": "r5a_8xlarge" }, { - "value": "u-6tb1.metal", - "name": "u_6tb1_metal" + "value": "r5a.12xlarge", + "name": "r5a_12xlarge" }, { - "value": "u-9tb1.metal", - "name": "u_9tb1_metal" + "value": "r5a.16xlarge", + "name": "r5a_16xlarge" }, { - "value": "u-12tb1.metal", - "name": "u_12tb1_metal" + "value": "r5a.24xlarge", + "name": "r5a_24xlarge" }, { - "value": "u-18tb1.metal", - "name": "u_18tb1_metal" + "value": "r5ad.large", + "name": "r5ad_large" }, { - "value": "u-24tb1.metal", - "name": "u_24tb1_metal" + "value": "r5ad.xlarge", + "name": "r5ad_xlarge" }, { - "value": "a1.medium", - "name": "a1_medium" + "value": "r5ad.2xlarge", + "name": "r5ad_2xlarge" }, { - "value": "a1.large", - "name": "a1_large" + "value": "r5ad.4xlarge", + "name": "r5ad_4xlarge" }, { - "value": "a1.xlarge", - "name": "a1_xlarge" + "value": "r5ad.8xlarge", + "name": "r5ad_8xlarge" }, { - "value": "a1.2xlarge", - "name": "a1_2xlarge" + "value": "r5ad.12xlarge", + "name": "r5ad_12xlarge" }, { - "value": "a1.4xlarge", - "name": "a1_4xlarge" + "value": "r5ad.16xlarge", + "name": "r5ad_16xlarge" }, { - "value": "a1.metal", - "name": "a1_metal" + "value": "r5ad.24xlarge", + "name": "r5ad_24xlarge" }, { - "value": "m5dn.large", - "name": "m5dn_large" + "value": "r5b.large", + "name": "r5b_large" }, { - "value": "m5dn.xlarge", - "name": "m5dn_xlarge" + "value": "r5b.xlarge", + "name": "r5b_xlarge" }, { - "value": "m5dn.2xlarge", - "name": "m5dn_2xlarge" + "value": "r5b.2xlarge", + "name": "r5b_2xlarge" }, { - "value": "m5dn.4xlarge", - "name": "m5dn_4xlarge" + "value": "r5b.4xlarge", + "name": "r5b_4xlarge" }, { - "value": "m5dn.8xlarge", - "name": "m5dn_8xlarge" + "value": "r5b.8xlarge", + "name": "r5b_8xlarge" }, { - "value": "m5dn.12xlarge", - "name": "m5dn_12xlarge" + "value": "r5b.12xlarge", + "name": "r5b_12xlarge" }, { - "value": "m5dn.16xlarge", - "name": "m5dn_16xlarge" + "value": "r5b.16xlarge", + "name": "r5b_16xlarge" }, { - "value": "m5dn.24xlarge", - "name": "m5dn_24xlarge" + "value": "r5b.24xlarge", + "name": "r5b_24xlarge" }, { - "value": "m5dn.metal", - "name": "m5dn_metal" + "value": "r5b.metal", + "name": "r5b_metal" }, { - "value": "m5n.large", - "name": "m5n_large" + "value": "r5d.large", + "name": "r5d_large" }, { - "value": "m5n.xlarge", - "name": "m5n_xlarge" + "value": "r5d.xlarge", + "name": "r5d_xlarge" }, { - "value": "m5n.2xlarge", - "name": "m5n_2xlarge" + "value": "r5d.2xlarge", + "name": "r5d_2xlarge" }, { - "value": "m5n.4xlarge", - "name": "m5n_4xlarge" + "value": "r5d.4xlarge", + "name": "r5d_4xlarge" }, { - "value": "m5n.8xlarge", - "name": "m5n_8xlarge" + "value": "r5d.8xlarge", + "name": "r5d_8xlarge" }, { - "value": "m5n.12xlarge", - "name": "m5n_12xlarge" + "value": "r5d.12xlarge", + "name": "r5d_12xlarge" }, { - "value": "m5n.16xlarge", - "name": "m5n_16xlarge" + "value": "r5d.16xlarge", + "name": "r5d_16xlarge" }, { - "value": "m5n.24xlarge", - "name": "m5n_24xlarge" + "value": "r5d.24xlarge", + "name": "r5d_24xlarge" }, { - "value": "m5n.metal", - "name": "m5n_metal" + "value": "r5d.metal", + "name": "r5d_metal" }, { "value": "r5dn.large", @@ -45862,208 +45896,228 @@ "name": "r5n_metal" }, { - "value": "inf1.xlarge", - "name": "inf1_xlarge" + "value": "r6g.medium", + "name": "r6g_medium" }, { - "value": "inf1.2xlarge", - "name": "inf1_2xlarge" + "value": "r6g.large", + "name": "r6g_large" }, { - "value": "inf1.6xlarge", - "name": "inf1_6xlarge" + "value": "r6g.xlarge", + "name": "r6g_xlarge" }, { - "value": "inf1.24xlarge", - "name": "inf1_24xlarge" + "value": "r6g.2xlarge", + "name": "r6g_2xlarge" }, { - "value": "m6g.metal", - "name": "m6g_metal" + "value": "r6g.4xlarge", + "name": "r6g_4xlarge" }, { - "value": "m6g.medium", - "name": "m6g_medium" + "value": "r6g.8xlarge", + "name": "r6g_8xlarge" }, { - "value": "m6g.large", - "name": "m6g_large" + "value": "r6g.12xlarge", + "name": "r6g_12xlarge" }, { - "value": "m6g.xlarge", - "name": "m6g_xlarge" + "value": "r6g.16xlarge", + "name": "r6g_16xlarge" }, { - "value": "m6g.2xlarge", - "name": "m6g_2xlarge" + "value": "r6g.metal", + "name": "r6g_metal" }, { - "value": "m6g.4xlarge", - "name": "m6g_4xlarge" + "value": "r6gd.medium", + "name": "r6gd_medium" }, { - "value": "m6g.8xlarge", - "name": "m6g_8xlarge" + "value": "r6gd.large", + "name": "r6gd_large" }, { - "value": "m6g.12xlarge", - "name": "m6g_12xlarge" + "value": "r6gd.xlarge", + "name": "r6gd_xlarge" }, { - "value": "m6g.16xlarge", - "name": "m6g_16xlarge" + "value": "r6gd.2xlarge", + "name": "r6gd_2xlarge" }, { - "value": "m6gd.metal", - "name": "m6gd_metal" + "value": "r6gd.4xlarge", + "name": "r6gd_4xlarge" }, { - "value": "m6gd.medium", - "name": "m6gd_medium" + "value": "r6gd.8xlarge", + "name": "r6gd_8xlarge" }, { - "value": "m6gd.large", - "name": "m6gd_large" + "value": "r6gd.12xlarge", + "name": "r6gd_12xlarge" }, { - "value": "m6gd.xlarge", - "name": "m6gd_xlarge" + "value": "r6gd.16xlarge", + "name": "r6gd_16xlarge" }, { - "value": "m6gd.2xlarge", - "name": "m6gd_2xlarge" + "value": "r6gd.metal", + "name": "r6gd_metal" }, { - "value": "m6gd.4xlarge", - "name": "m6gd_4xlarge" + "value": "t1.micro", + "name": "t1_micro" }, { - "value": "m6gd.8xlarge", - "name": "m6gd_8xlarge" + "value": "t2.nano", + "name": "t2_nano" }, { - "value": "m6gd.12xlarge", - "name": "m6gd_12xlarge" + "value": "t2.micro", + "name": "t2_micro" }, { - "value": "m6gd.16xlarge", - "name": "m6gd_16xlarge" + "value": "t2.small", + "name": "t2_small" }, { - "value": "m6a.large", - "name": "m6a_large" + "value": "t2.medium", + "name": "t2_medium" }, { - "value": "m6a.xlarge", - "name": "m6a_xlarge" + "value": "t2.large", + "name": "t2_large" }, { - "value": "m6a.2xlarge", - "name": "m6a_2xlarge" + "value": "t2.xlarge", + "name": "t2_xlarge" }, { - "value": "m6a.4xlarge", - "name": "m6a_4xlarge" + "value": "t2.2xlarge", + "name": "t2_2xlarge" }, { - "value": "m6a.8xlarge", - "name": "m6a_8xlarge" + "value": "t3.nano", + "name": "t3_nano" }, { - "value": "m6a.12xlarge", - "name": "m6a_12xlarge" + "value": "t3.micro", + "name": "t3_micro" }, { - "value": "m6a.16xlarge", - "name": "m6a_16xlarge" + "value": "t3.small", + "name": "t3_small" }, { - "value": "m6a.24xlarge", - "name": "m6a_24xlarge" + "value": "t3.medium", + "name": "t3_medium" }, { - "value": "m6a.32xlarge", - "name": "m6a_32xlarge" + "value": "t3.large", + "name": "t3_large" }, { - "value": "m6a.48xlarge", - "name": "m6a_48xlarge" + "value": "t3.xlarge", + "name": "t3_xlarge" }, { - "value": "m6i.large", - "name": "m6i_large" + "value": "t3.2xlarge", + "name": "t3_2xlarge" }, { - "value": "m6i.xlarge", - "name": "m6i_xlarge" + "value": "t3a.nano", + "name": "t3a_nano" }, { - "value": "m6i.2xlarge", - "name": "m6i_2xlarge" + "value": "t3a.micro", + "name": "t3a_micro" }, { - "value": "m6i.4xlarge", - "name": "m6i_4xlarge" + "value": "t3a.small", + "name": "t3a_small" }, { - "value": "m6i.8xlarge", - "name": "m6i_8xlarge" + "value": "t3a.medium", + "name": "t3a_medium" }, { - "value": "m6i.12xlarge", - "name": "m6i_12xlarge" + "value": "t3a.large", + "name": "t3a_large" }, { - "value": "m6i.16xlarge", - "name": "m6i_16xlarge" + "value": "t3a.xlarge", + "name": "t3a_xlarge" }, { - "value": "m6i.24xlarge", - "name": "m6i_24xlarge" + "value": "t3a.2xlarge", + "name": "t3a_2xlarge" }, { - "value": "m6i.32xlarge", - "name": "m6i_32xlarge" + "value": "t4g.nano", + "name": "t4g_nano" }, { - "value": "mac1.metal", - "name": "mac1_metal" + "value": "t4g.micro", + "name": "t4g_micro" }, { - "value": "x2gd.medium", - "name": "x2gd_medium" + "value": "t4g.small", + "name": "t4g_small" }, { - "value": "x2gd.large", - "name": "x2gd_large" + "value": "t4g.medium", + "name": "t4g_medium" }, { - "value": "x2gd.xlarge", - "name": "x2gd_xlarge" + "value": "t4g.large", + "name": "t4g_large" }, { - "value": "x2gd.2xlarge", - "name": "x2gd_2xlarge" + "value": "t4g.xlarge", + "name": "t4g_xlarge" }, { - "value": "x2gd.4xlarge", - "name": "x2gd_4xlarge" + "value": "t4g.2xlarge", + "name": "t4g_2xlarge" }, { - "value": "x2gd.8xlarge", - "name": "x2gd_8xlarge" + "value": "u-6tb1.56xlarge", + "name": "u_6tb1_56xlarge" }, { - "value": "x2gd.12xlarge", - "name": "x2gd_12xlarge" + "value": "u-6tb1.112xlarge", + "name": "u_6tb1_112xlarge" }, { - "value": "x2gd.16xlarge", - "name": "x2gd_16xlarge" + "value": "u-9tb1.112xlarge", + "name": "u_9tb1_112xlarge" }, { - "value": "x2gd.metal", - "name": "x2gd_metal" + "value": "u-12tb1.112xlarge", + "name": "u_12tb1_112xlarge" + }, + { + "value": "u-6tb1.metal", + "name": "u_6tb1_metal" + }, + { + "value": "u-9tb1.metal", + "name": "u_9tb1_metal" + }, + { + "value": "u-12tb1.metal", + "name": "u_12tb1_metal" + }, + { + "value": "u-18tb1.metal", + "name": "u_18tb1_metal" + }, + { + "value": "u-24tb1.metal", + "name": "u_24tb1_metal" }, { "value": "vt1.3xlarge", @@ -46078,108 +46132,100 @@ "name": "vt1_24xlarge" }, { - "value": "im4gn.16xlarge", - "name": "im4gn_16xlarge" - }, - { - "value": "im4gn.2xlarge", - "name": "im4gn_2xlarge" - }, - { - "value": "im4gn.4xlarge", - "name": "im4gn_4xlarge" + "value": "x1.16xlarge", + "name": "x1_16xlarge" }, { - "value": "im4gn.8xlarge", - "name": "im4gn_8xlarge" + "value": "x1.32xlarge", + "name": "x1_32xlarge" }, { - "value": "im4gn.large", - "name": "im4gn_large" + "value": "x1e.xlarge", + "name": "x1e_xlarge" }, { - "value": "im4gn.xlarge", - "name": "im4gn_xlarge" + "value": "x1e.2xlarge", + "name": "x1e_2xlarge" }, { - "value": "is4gen.2xlarge", - "name": "is4gen_2xlarge" + "value": "x1e.4xlarge", + "name": "x1e_4xlarge" }, { - "value": "is4gen.4xlarge", - "name": "is4gen_4xlarge" + "value": "x1e.8xlarge", + "name": "x1e_8xlarge" }, { - "value": "is4gen.8xlarge", - "name": "is4gen_8xlarge" + "value": "x1e.16xlarge", + "name": "x1e_16xlarge" }, { - "value": "is4gen.large", - "name": "is4gen_large" + "value": "x1e.32xlarge", + "name": "x1e_32xlarge" }, { - "value": "is4gen.medium", - "name": "is4gen_medium" + "value": "x2gd.medium", + "name": "x2gd_medium" }, { - "value": "is4gen.xlarge", - "name": "is4gen_xlarge" + "value": "x2gd.large", + "name": "x2gd_large" }, { - "value": "g5g.xlarge", - "name": "g5g_xlarge" + "value": "x2gd.xlarge", + "name": "x2gd_xlarge" }, { - "value": "g5g.2xlarge", - "name": "g5g_2xlarge" + "value": "x2gd.2xlarge", + "name": "x2gd_2xlarge" }, { - "value": "g5g.4xlarge", - "name": "g5g_4xlarge" + "value": "x2gd.4xlarge", + "name": "x2gd_4xlarge" }, { - "value": "g5g.8xlarge", - "name": "g5g_8xlarge" + "value": "x2gd.8xlarge", + "name": "x2gd_8xlarge" }, { - "value": "g5g.16xlarge", - "name": "g5g_16xlarge" + "value": "x2gd.12xlarge", + "name": "x2gd_12xlarge" }, { - "value": "g5g.metal", - "name": "g5g_metal" + "value": "x2gd.16xlarge", + "name": "x2gd_16xlarge" }, { - "value": "g5.xlarge", - "name": "g5_xlarge" + "value": "x2gd.metal", + "name": "x2gd_metal" }, { - "value": "g5.2xlarge", - "name": "g5_2xlarge" + "value": "z1d.large", + "name": "z1d_large" }, { - "value": "g5.4xlarge", - "name": "g5_4xlarge" + "value": "z1d.xlarge", + "name": "z1d_xlarge" }, { - "value": "g5.8xlarge", - "name": "g5_8xlarge" + "value": "z1d.2xlarge", + "name": "z1d_2xlarge" }, { - "value": "g5.12xlarge", - "name": "g5_12xlarge" + "value": "z1d.3xlarge", + "name": "z1d_3xlarge" }, { - "value": "g5.16xlarge", - "name": "g5_16xlarge" + "value": "z1d.6xlarge", + "name": "z1d_6xlarge" }, { - "value": "g5.24xlarge", - "name": "g5_24xlarge" + "value": "z1d.12xlarge", + "name": "z1d_12xlarge" }, { - "value": "g5.48xlarge", - "name": "g5_48xlarge" + "value": "z1d.metal", + "name": "z1d_metal" } ] } @@ -48643,13 +48689,13 @@ "Add": { "target": "com.amazonaws.ec2#LaunchPermissionList", "traits": { - "smithy.api#documentation": "

              The Amazon Web Services account ID to add to the list of launch permissions for the AMI.

              " + "smithy.api#documentation": "

              The Amazon Web Services account ID, organization ARN, or OU ARN to add to the list of launch permissions for the AMI.

              " } }, "Remove": { "target": "com.amazonaws.ec2#LaunchPermissionList", "traits": { - "smithy.api#documentation": "

              The Amazon Web Services account ID to remove from the list of launch permissions for the AMI.

              " + "smithy.api#documentation": "

              The Amazon Web Services account ID, organization ARN, or OU ARN to remove from the list of launch permissions for the AMI.

              " } } }, @@ -49543,7 +49589,7 @@ "target": "com.amazonaws.ec2#LaunchTemplateInstanceMetadataEndpointState", "traits": { "aws.protocols#ec2QueryName": "HttpEndpoint", - "smithy.api#documentation": "

              This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

              \n \n

              If you specify a value of disabled, you will not be able to access your instance metadata.\n

              \n
              ", + "smithy.api#documentation": "

              Enables or disables the HTTP metadata endpoint on your instances. If the parameter is\n not specified, the default state is enabled.

              \n \n

              If you specify a value of disabled, you will not be able to access your instance metadata.\n

              \n
              ", "smithy.api#xmlName": "httpEndpoint" } }, @@ -49554,6 +49600,14 @@ "smithy.api#documentation": "

              Enables or disables the IPv6 endpoint for the instance metadata service.

              \n

              Default: disabled\n

              ", "smithy.api#xmlName": "httpProtocolIpv6" } + }, + "InstanceMetadataTags": { + "target": "com.amazonaws.ec2#LaunchTemplateInstanceMetadataTagsState", + "traits": { + "aws.protocols#ec2QueryName": "InstanceMetadataTags", + "smithy.api#documentation": "

              \n \n

              ", + "smithy.api#xmlName": "instanceMetadataTags" + } } }, "traits": { @@ -49578,7 +49632,7 @@ "HttpEndpoint": { "target": "com.amazonaws.ec2#LaunchTemplateInstanceMetadataEndpointState", "traits": { - "smithy.api#documentation": "

              This parameter enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled.

              \n \n

              If you specify a value of disabled, you will not be able to access your instance metadata.\n

              \n
              " + "smithy.api#documentation": "

              Enables or disables the HTTP metadata endpoint on your instances. If the parameter is not\n specified, the default state is enabled.

              \n \n

              If you specify a value of disabled, you will not be able to access your instance metadata.\n

              \n
              " } }, "HttpProtocolIpv6": { @@ -49586,6 +49640,12 @@ "traits": { "smithy.api#documentation": "

              Enables or disables the IPv6 endpoint for the instance metadata service.

              \n

              Default: disabled\n

              " } + }, + "InstanceMetadataTags": { + "target": "com.amazonaws.ec2#LaunchTemplateInstanceMetadataTagsState", + "traits": { + "smithy.api#documentation": "

              Set to enabled to allow access to instance tags from the instance\n metadata. Set to disabled to turn off access to instance tags from the instance\n metadata. For more information, see Work with\n instance tags using the instance metadata.

              \n

              Default: disabled\n

              " + } } }, "traits": { @@ -49622,6 +49682,21 @@ ] } }, + "com.amazonaws.ec2#LaunchTemplateInstanceMetadataTagsState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "disabled", + "name": "disabled" + }, + { + "value": "enabled", + "name": "enabled" + } + ] + } + }, "com.amazonaws.ec2#LaunchTemplateInstanceNetworkInterfaceSpecification": { "type": "structure", "members": { @@ -53082,7 +53157,7 @@ "HttpEndpoint": { "target": "com.amazonaws.ec2#InstanceMetadataEndpointState", "traits": { - "smithy.api#documentation": "

              Enables or disables the HTTP metadata endpoint on your instances. If\n the parameter is not specified, the existing state is maintained.

              \n

              If you specify a value of disabled, you cannot access your\n instance metadata.

              " + "smithy.api#documentation": "

              Enables or disables the HTTP metadata endpoint on your instances. If\n this parameter is not specified, the existing state is maintained.

              \n

              If you specify a value of disabled, you cannot access your\n instance metadata.

              " } }, "DryRun": { @@ -53096,6 +53171,12 @@ "traits": { "smithy.api#documentation": "

              Enables or disables the IPv6 endpoint for the instance metadata service. This setting \n applies only if you have enabled the HTTP metadata endpoint.

              " } + }, + "InstanceMetadataTags": { + "target": "com.amazonaws.ec2#InstanceMetadataTagsState", + "traits": { + "smithy.api#documentation": "

              Set to enabled to allow access to instance tags from the instance\n metadata. Set to disabled to turn off access to instance tags from the\n instance metadata. For more information, see Work with\n instance tags using the instance metadata.

              \n

              Default: disabled\n

              " + } } } }, @@ -55040,6 +55121,56 @@ } } }, + "com.amazonaws.ec2#ModifyVpcEndpointServicePayerResponsibility": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#ModifyVpcEndpointServicePayerResponsibilityRequest" + }, + "output": { + "target": "com.amazonaws.ec2#ModifyVpcEndpointServicePayerResponsibilityResult" + }, + "traits": { + "smithy.api#documentation": "

              Modifies the payer responsibility for your VPC endpoint service.

              " + } + }, + "com.amazonaws.ec2#ModifyVpcEndpointServicePayerResponsibilityRequest": { + "type": "structure", + "members": { + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

              Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

              " + } + }, + "ServiceId": { + "target": "com.amazonaws.ec2#VpcEndpointServiceId", + "traits": { + "smithy.api#documentation": "

              The ID of the service.

              ", + "smithy.api#required": {} + } + }, + "PayerResponsibility": { + "target": "com.amazonaws.ec2#PayerResponsibility", + "traits": { + "smithy.api#documentation": "

              The entity that is responsible for the endpoint costs. The default is the endpoint owner.\n If you set the payer responsibility to the service owner, you cannot set it back to the\n endpoint owner.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.ec2#ModifyVpcEndpointServicePayerResponsibilityResult": { + "type": "structure", + "members": { + "ReturnValue": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Return", + "smithy.api#documentation": "

              Returns true if the request succeeds; otherwise, it returns an error.

              ", + "smithy.api#xmlName": "return" + } + } + } + }, "com.amazonaws.ec2#ModifyVpcEndpointServicePermissions": { "type": "operation", "input": { @@ -55493,7 +55624,7 @@ "DPDTimeoutSeconds": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

              The number of seconds after which a DPD timeout occurs.

              \n

              Constraints: A value between 0 and 30.

              \n

              Default: 30\n

              " + "smithy.api#documentation": "

              The number of seconds after which a DPD timeout occurs.

              \n

              Constraints: A value greater than or equal to 30.

              \n

              Default: 30\n

              " } }, "DPDTimeoutAction": { @@ -55726,19 +55857,22 @@ "Cidr": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

              The BYOIP CIDR.

              " + "smithy.api#documentation": "

              The BYOIP CIDR.

              ", + "smithy.api#required": {} } }, "IpamPoolId": { "target": "com.amazonaws.ec2#IpamPoolId", "traits": { - "smithy.api#documentation": "

              The IPAM pool ID.

              " + "smithy.api#documentation": "

              The IPAM pool ID.

              ", + "smithy.api#required": {} } }, "IpamPoolOwner": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "

              The Amazon Web Services account ID of the owner of the IPAM pool.

              " + "smithy.api#documentation": "

              The Amazon Web Services account ID of the owner of the IPAM pool.

              ", + "smithy.api#required": {} } } } @@ -58163,6 +58297,17 @@ "smithy.api#documentation": "

              Describes a path statement.

              " } }, + "com.amazonaws.ec2#PayerResponsibility": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ServiceOwner", + "name": "ServiceOwner" + } + ] + } + }, "com.amazonaws.ec2#PaymentOption": { "type": "string", "traits": { @@ -58749,12 +58894,26 @@ "smithy.api#documentation": "

              Any tags applied to the placement group.

              ", "smithy.api#xmlName": "tagSet" } + }, + "GroupArn": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "GroupArn", + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the placement group.

              ", + "smithy.api#xmlName": "groupArn" + } } }, "traits": { "smithy.api#documentation": "

              Describes a placement group.

              " } }, + "com.amazonaws.ec2#PlacementGroupArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws([a-z-]+)?:ec2:[a-z\\d-]+:\\d{12}:placement-group/([^\\s].+[^\\s]){1,255}$" + } + }, "com.amazonaws.ec2#PlacementGroupId": { "type": "string" }, @@ -60189,7 +60348,7 @@ } }, "HostReservationId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#HostReservationId", "traits": { "aws.protocols#ec2QueryName": "HostReservationId", "smithy.api#documentation": "

              The ID of the reservation.

              ", @@ -67612,6 +67771,14 @@ "smithy.api#xmlName": "privateDnsNameConfiguration" } }, + "PayerResponsibility": { + "target": "com.amazonaws.ec2#PayerResponsibility", + "traits": { + "aws.protocols#ec2QueryName": "PayerResponsibility", + "smithy.api#documentation": "

              The payer responsibility.

              ", + "smithy.api#xmlName": "payerResponsibility" + } + }, "Tags": { "target": "com.amazonaws.ec2#TagList", "traits": { @@ -67725,6 +67892,14 @@ "smithy.api#xmlName": "managesVpcEndpoints" } }, + "PayerResponsibility": { + "target": "com.amazonaws.ec2#PayerResponsibility", + "traits": { + "aws.protocols#ec2QueryName": "PayerResponsibility", + "smithy.api#documentation": "

              The payer responsibility.

              ", + "smithy.api#xmlName": "payerResponsibility" + } + }, "Tags": { "target": "com.amazonaws.ec2#TagList", "traits": { @@ -68619,13 +68794,13 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "TerminationDelay", - "smithy.api#documentation": "

              The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot\n Instance after launching a new replacement Spot Instance.

              \n

              Valid only when ReplacementStrategy is set to launch-before-terminate.

              \n

              Valid values: Minimum value of 120 seconds. Maximum value of 7200 seconds.

              ", + "smithy.api#documentation": "

              The amount of time (in seconds) that Amazon EC2 waits before terminating the old Spot\n Instance after launching a new replacement Spot Instance.

              \n

              Required when ReplacementStrategy is set to launch-before-terminate.

              \n

              Not valid when ReplacementStrategy is set to launch.

              \n

              Valid values: Minimum value of 120 seconds. Maximum value of 7200 seconds.

              ", "smithy.api#xmlName": "terminationDelay" } } }, "traits": { - "smithy.api#documentation": "

              The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your\n Spot Instance is at an elevated risk of being interrupted. For more information, see\n Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

              " + "smithy.api#documentation": "

              The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your\n Spot Instance is at an elevated risk of being interrupted. For more information, see\n Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

              " } }, "com.amazonaws.ec2#SpotDatafeedSubscription": { @@ -69484,7 +69659,7 @@ "target": "com.amazonaws.ec2#SpotCapacityRebalance", "traits": { "aws.protocols#ec2QueryName": "CapacityRebalance", - "smithy.api#documentation": "

              The strategy to use when Amazon EC2 emits a signal that your Spot Instance is at an\n elevated risk of being interrupted.

              ", + "smithy.api#documentation": "

              The Spot Instance replacement strategy to use when Amazon EC2 emits a signal that your\n Spot Instance is at an elevated risk of being interrupted. For more information, see\n Capacity rebalancing in the Amazon EC2 User Guide for Linux Instances.

              ", "smithy.api#xmlName": "capacityRebalance" } } @@ -78061,7 +78236,7 @@ "DPDTimeoutSeconds": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "

              The number of seconds after which a DPD timeout occurs.

              \n

              Constraints: A value between 0 and 30.

              \n

              Default: 30\n

              " + "smithy.api#documentation": "

              The number of seconds after which a DPD timeout occurs.

              \n

              Constraints: A value greater than or equal to 30.

              \n

              Default: 30\n

              " } }, "DPDTimeoutAction": { diff --git a/codegen/sdk-codegen/aws-models/ecs.json b/codegen/sdk-codegen/aws-models/ecs.json index c023b85de0b7..ed4ac8ab6c8f 100644 --- a/codegen/sdk-codegen/aws-models/ecs.json +++ b/codegen/sdk-codegen/aws-models/ecs.json @@ -285,7 +285,7 @@ "status": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "

              The status of the attachment. Valid values are PRECREATED,\n\t\t\t\tCREATED, ATTACHING, ATTACHED,\n\t\t\t\tDETACHING, DETACHED, and DELETED.

              " + "smithy.api#documentation": "

              The status of the attachment. Valid values are PRECREATED,\n\t\t\t\tCREATED, ATTACHING, ATTACHED,\n\t\t\t\tDETACHING, DETACHED, DELETED, and\n\t\t\t\tFAILED.

              " } }, "details": { @@ -409,7 +409,7 @@ "managedTerminationProtection": { "target": "com.amazonaws.ecs#ManagedTerminationProtection", "traits": { - "smithy.api#documentation": "

              The managed termination protection setting to use for the Auto Scaling group capacity\n\t\t\tprovider. This determines whether the Auto Scaling group has managed termination\n\t\t\tprotection.

              \n\t\t \n\t\t\t

              When using managed termination protection, managed scaling must also be used\n\t\t\t\totherwise managed termination protection doesn't work.

              \n\t\t
              \n\t\t

              When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in\n\t\t\tan Auto Scaling group that contain tasks from being terminated during a scale-in action.\n\t\t\tThe Auto Scaling group and each instance in the Auto Scaling group must have instance\n\t\t\tprotection from scale-in actions enabled as well. For more information, see Instance Protection in the Auto Scaling User Guide.

              \n\t\t

              When managed termination protection is disabled, your Amazon EC2 instances aren't protected\n\t\t\tfrom termination when the Auto Scaling group scales in.

              " + "smithy.api#documentation": "

              The managed termination protection setting to use for the Auto Scaling group capacity\n\t\t\tprovider. This determines whether the Auto Scaling group has managed termination\n\t\t\tprotection. The default is disabled.

              \n\t\t \n\t\t\t

              When using managed termination protection, managed scaling must also be used\n\t\t\t\totherwise managed termination protection doesn't work.

              \n\t\t
              \n\t\t

              When managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in\n\t\t\tan Auto Scaling group that contain tasks from being terminated during a scale-in action.\n\t\t\tThe Auto Scaling group and each instance in the Auto Scaling group must have instance\n\t\t\tprotection from scale-in actions enabled as well. For more information, see Instance Protection in the Auto Scaling User Guide.

              \n\t\t

              When managed termination protection is disabled, your Amazon EC2 instances aren't protected\n\t\t\tfrom termination when the Auto Scaling group scales in.

              " } } }, @@ -6561,7 +6561,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Starts a new task using the specified task definition.

              \n\t\t

              You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

              \n\t\t

              Alternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.

              \n\t\t

              The Amazon ECS API follows an eventual consistency model. This is because the distributed\n\t\t\tnature of the system supporting the API. This means that the result of an API command\n\t\t\tyou run that affects your Amazon ECS resources might not be immediately visible to all\n\t\t\tsubsequent commands you run. Keep this in mind when you carry out an API command that\n\t\t\timmediately follows a previous API command.

              \n\t\t

              To manage eventual consistency, you can do the following:

              \n\t\t
                \n
              • \n\t\t\t\t

                Confirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.

                \n\t\t\t
              • \n
              • \n\t\t\t\t

                Add wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.

                \n\t\t\t
              • \n
              " + "smithy.api#documentation": "

              Starts a new task using the specified task definition.

              \n\t\t

              You can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.

              \n\t\t

              Alternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.

              \n\t\t

              The Amazon ECS API follows an eventual consistency model. This is because of the distributed\n\t\t\tnature of the system supporting the API. This means that the result of an API command\n\t\t\tyou run that affects your Amazon ECS resources might not be immediately visible to all\n\t\t\tsubsequent commands you run. Keep this in mind when you carry out an API command that\n\t\t\timmediately follows a previous API command.

              \n\t\t

              To manage eventual consistency, you can do the following:

              \n\t\t
                \n
              • \n\t\t\t\t

                Confirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.

                \n\t\t\t
              • \n
              • \n\t\t\t\t

                Add wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.

                \n\t\t\t
              • \n
              " } }, "com.amazonaws.ecs#RunTaskRequest": { @@ -9097,7 +9097,7 @@ "containerInstances": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "

              A list of container instance IDs or full ARN entries.

              ", + "smithy.api#documentation": "

              A list of up to 10 container instance IDs or full ARN entries.

              ", "smithy.api#required": {} } }, diff --git a/codegen/sdk-codegen/aws-models/eks.json b/codegen/sdk-codegen/aws-models/eks.json index 658c62bdeae2..dcac720e44e3 100644 --- a/codegen/sdk-codegen/aws-models/eks.json +++ b/codegen/sdk-codegen/aws-models/eks.json @@ -74,7 +74,7 @@ "name": "eks" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

              Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy for you to run Kubernetes on\n Amazon Web Services without needing to stand up or maintain your own Kubernetes control plane.\n Kubernetes is an open-source system for automating the deployment, scaling, and\n management of containerized applications.

              \n

              Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so you can use\n all the existing plugins and tooling from the Kubernetes community. Applications running\n on Amazon EKS are fully compatible with applications running on any standard Kubernetes\n environment, whether running in on-premises data centers or public clouds. This means\n that you can easily migrate any standard Kubernetes application to Amazon EKS without any\n code modification required.

              ", + "smithy.api#documentation": "

              Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy\n for you to run Kubernetes on Amazon Web Services without needing to stand up or maintain\n your own Kubernetes control plane. Kubernetes is an open-source system for automating\n the deployment, scaling, and management of containerized applications.

              \n

              Amazon EKS runs up-to-date versions of the open-source Kubernetes software, so\n you can use all the existing plugins and tooling from the Kubernetes community.\n Applications running on Amazon EKS are fully compatible with applications\n running on any standard Kubernetes environment, whether running in on-premises data\n centers or public clouds. This means that you can easily migrate any standard Kubernetes\n application to Amazon EKS without any code modification required.

              ", "smithy.api#title": "Amazon Elastic Kubernetes Service" }, "version": "2017-11-01", @@ -191,7 +191,7 @@ } }, "traits": { - "smithy.api#documentation": "

              You don't have permissions to perform the requested operation. The user or role that\n is making the request must have at least one IAM permissions policy attached that\n grants the required permissions. For more information, see Access\n Management in the IAM User Guide.

              ", + "smithy.api#documentation": "

              You don't have permissions to perform the requested operation. The user or role that\n is making the request must have at least one IAM permissions policy\n attached that grants the required permissions. For more information, see Access\n Management in the IAM User Guide.

              ", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -250,7 +250,7 @@ "serviceAccountRoleArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the IAM role that is bound to the Kubernetes service account used\n by the add-on.

              " + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the IAM role that is bound to the Kubernetes service\n account used by the add-on.

              " } }, "tags": { @@ -610,12 +610,12 @@ "name": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The name of the Auto Scaling group associated with an Amazon EKS managed node group.

              " + "smithy.api#documentation": "

              The name of the Auto Scaling group associated with an Amazon EKS managed node\n group.

              " } } }, "traits": { - "smithy.api#documentation": "

              An Auto Scaling group that is associated with an Amazon EKS managed node group.

              " + "smithy.api#documentation": "

              An Auto Scaling group that is associated with an Amazon EKS managed node\n group.

              " } }, "com.amazonaws.eks#AutoScalingGroupList": { @@ -754,13 +754,13 @@ "roleArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control\n plane to make calls to Amazon Web Services API operations on your behalf.

              " + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the IAM role that provides permissions for the\n Kubernetes control plane to make calls to Amazon Web Services API operations on your\n behalf.

              " } }, "resourcesVpcConfig": { "target": "com.amazonaws.eks#VpcConfigResponse", "traits": { - "smithy.api#documentation": "

              The VPC configuration used by the cluster control plane. Amazon EKS VPC resources have\n specific requirements to work properly with Kubernetes. For more information, see Cluster VPC\n Considerations and Cluster Security Group Considerations in the\n Amazon EKS User Guide.

              " + "smithy.api#documentation": "

              The VPC configuration used by the cluster control plane. Amazon EKS VPC\n resources have specific requirements to work properly with Kubernetes. For more\n information, see Cluster VPC Considerations and Cluster Security\n Group Considerations in the Amazon EKS User Guide.

              " } }, "kubernetesNetworkConfig": { @@ -802,7 +802,7 @@ "platformVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The platform version of your Amazon EKS cluster. For more information, see Platform\n Versions in the \n Amazon EKS User Guide\n .

              " + "smithy.api#documentation": "

              The platform version of your Amazon EKS cluster. For more information, see\n Platform Versions in the \n Amazon EKS User Guide\n .

              " } }, "tags": { @@ -950,7 +950,7 @@ "roleArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the role that is authorized to request the connector configuration.

              ", + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the role that is authorized to request the connector\n configuration.

              ", "smithy.api#required": {} } }, @@ -984,7 +984,7 @@ "activationExpiry": { "target": "com.amazonaws.eks#Timestamp", "traits": { - "smithy.api#documentation": "

              The expiration time of the connected cluster. The cluster's YAML file must be applied through the native \n provider.

              " + "smithy.api#documentation": "

              The expiration time of the connected cluster. The cluster's YAML file must be applied\n through the native provider.

              " } }, "provider": { @@ -996,7 +996,7 @@ "roleArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the role to communicate with services from the connected Kubernetes cluster.

              " + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the role to communicate with services from the connected Kubernetes\n cluster.

              " } } }, @@ -1074,7 +1074,7 @@ "resolveConflicts": { "target": "com.amazonaws.eks#ResolveConflicts", "traits": { - "smithy.api#documentation": "

              How to resolve parameter value conflicts when migrating an existing add-on to an Amazon EKS\n add-on.

              " + "smithy.api#documentation": "

              How to resolve parameter value conflicts when migrating an existing add-on to an\n Amazon EKS add-on.

              " } }, "clientRequestToken": { @@ -1132,7 +1132,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Creates an Amazon EKS control plane.

              \n

              The Amazon EKS control plane consists of control plane instances that run the Kubernetes\n software, such as etcd and the API server. The control plane runs in an\n account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server\n endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its\n own set of Amazon EC2 instances.

              \n

              The cluster control plane is provisioned across multiple Availability Zones and\n fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC\n subnets to provide connectivity from the control plane instances to the nodes (for\n example, to support kubectl exec, logs, and proxy\n data flows).

              \n

              Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over\n the Kubernetes API server endpoint and a certificate file that is created for your\n cluster.

              \n \n

              In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster,\n you must configure your Kubernetes tooling to communicate with the API server and launch\n nodes into your cluster. For more information, see Managing Cluster\n Authentication and Launching Amazon EKS nodes in the Amazon EKS User Guide.

              ", + "smithy.api#documentation": "

              Creates an Amazon EKS control plane.

              \n

              The Amazon EKS control plane consists of control plane instances that run the Kubernetes\n software, such as etcd and the API server. The control plane runs in an\n account managed by Amazon Web Services, and the Kubernetes API is exposed by the Amazon EKS API server\n endpoint. Each Amazon EKS cluster control plane is single tenant and unique. It runs on its\n own set of Amazon EC2 instances.

              \n

              The cluster control plane is provisioned across multiple Availability Zones and\n fronted by an Elastic Load Balancing Network Load Balancer. Amazon EKS also provisions elastic network interfaces in your VPC\n subnets to provide connectivity from the control plane instances to the nodes (for\n example, to support kubectl exec, logs, and proxy\n data flows).

              \n

              Amazon EKS nodes run in your Amazon Web Services account and connect to your cluster's control plane over\n the Kubernetes API server endpoint and a certificate file that is created for your\n cluster.

              \n \n

              In most cases, it takes several minutes to create a cluster. After you create an Amazon EKS cluster,\n you must configure your Kubernetes tooling to communicate with the API server and launch\n nodes into your cluster. For more information, see Managing Cluster\n Authentication and Launching Amazon EKS nodes in the\n Amazon EKS User Guide.

              ", "smithy.api#http": { "method": "POST", "uri": "/clusters", @@ -1159,7 +1159,7 @@ "roleArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control\n plane to make calls to Amazon Web Services API operations on your behalf. For more information, see\n Amazon EKS\n Service IAM Role in the \n Amazon EKS User Guide\n .

              ", + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the IAM role that provides permissions for the\n Kubernetes control plane to make calls to Amazon Web Services API operations on your\n behalf. For more information, see Amazon EKS Service IAM Role in the \n Amazon EKS User Guide\n .

              ", "smithy.api#required": {} } }, @@ -1179,7 +1179,7 @@ "logging": { "target": "com.amazonaws.eks#Logging", "traits": { - "smithy.api#documentation": "

              Enable or disable exporting the Kubernetes control plane logs for your cluster to\n CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more\n information, see Amazon EKS Cluster control plane logs in the\n \n Amazon EKS User Guide\n .

              \n \n

              CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported\n control plane logs. For more information, see CloudWatch Pricing.

              \n
              " + "smithy.api#documentation": "

              Enable or disable exporting the Kubernetes control plane logs for your cluster to\n CloudWatch Logs. By default, cluster control plane logs aren't exported to\n CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the\n \n Amazon EKS User Guide\n .

              \n \n

              CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

              \n
              " } }, "clientRequestToken": { @@ -1243,7 +1243,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Creates an Fargate profile for your Amazon EKS cluster. You must have at least one Fargate\n profile in a cluster to be able to run pods on Fargate.

              \n

              The Fargate profile allows an administrator to declare which pods run on Fargate and specify\n which pods run on which Fargate profile. This declaration is done through the profile’s\n selectors. Each profile can have up to five selectors that contain a namespace and\n labels. A namespace is required for every selector. The label field consists of multiple\n optional key-value pairs. Pods that match the selectors are scheduled on Fargate. If a\n to-be-scheduled pod matches any of the selectors in the Fargate profile, then that pod is\n run on Fargate.

              \n

              When you create a Fargate profile, you must specify a pod execution role to use with the\n pods that are scheduled with the profile. This role is added to the cluster's Kubernetes\n Role Based Access\n Control (RBAC) for authorization so that the kubelet that is\n running on the Fargate infrastructure can register with your Amazon EKS cluster so that it can\n appear in your cluster as a node. The pod execution role also provides IAM permissions\n to the Fargate infrastructure to allow read access to Amazon ECR image repositories. For more\n information, see Pod Execution Role in the Amazon EKS User Guide.

              \n

              Fargate profiles are immutable. However, you can create a new updated profile to replace\n an existing profile and then delete the original after the updated profile has finished\n creating.

              \n

              If any Fargate profiles in a cluster are in the DELETING status, you must\n wait for that Fargate profile to finish deleting before you can create any other profiles\n in that cluster.

              \n

              For more information, see Fargate Profile in the Amazon EKS User Guide.

              ", + "smithy.api#documentation": "

              Creates an Fargate profile for your Amazon EKS cluster. You\n must have at least one Fargate profile in a cluster to be able to run\n pods on Fargate.

              \n

              The Fargate profile allows an administrator to declare which pods run\n on Fargate and specify which pods run on which Fargate\n profile. This declaration is done through the profile’s selectors. Each profile can have\n up to five selectors that contain a namespace and labels. A namespace is required for\n every selector. The label field consists of multiple optional key-value pairs. Pods that\n match the selectors are scheduled on Fargate. If a to-be-scheduled pod\n matches any of the selectors in the Fargate profile, then that pod is run\n on Fargate.

              \n

              When you create a Fargate profile, you must specify a pod execution\n role to use with the pods that are scheduled with the profile. This role is added to the\n cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the\n kubelet that is running on the Fargate infrastructure\n can register with your Amazon EKS cluster so that it can appear in your cluster\n as a node. The pod execution role also provides IAM permissions to the\n Fargate infrastructure to allow read access to Amazon ECR\n image repositories. For more information, see Pod\n Execution Role in the Amazon EKS User Guide.

              \n

              Fargate profiles are immutable. However, you can create a new updated\n profile to replace an existing profile and then delete the original after the updated\n profile has finished creating.

              \n

              If any Fargate profiles in a cluster are in the DELETING\n status, you must wait for that Fargate profile to finish deleting before\n you can create any other profiles in that cluster.

              \n

              For more information, see Fargate Profile in the\n Amazon EKS User Guide.

              ", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/fargate-profiles", @@ -1264,7 +1264,7 @@ "clusterName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The name of the Amazon EKS cluster to apply the Fargate profile to.

              ", + "smithy.api#documentation": "

              The name of the Amazon EKS cluster to apply the Fargate profile\n to.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1272,20 +1272,20 @@ "podExecutionRoleArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the pod execution role to use for pods that match the selectors in\n the Fargate profile. The pod execution role allows Fargate infrastructure to register with\n your cluster as a node, and it provides read access to Amazon ECR image repositories. For\n more information, see Pod Execution Role in the\n Amazon EKS User Guide.

              ", + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the pod execution role to use for pods that match the selectors in\n the Fargate profile. The pod execution role allows Fargate\n infrastructure to register with your cluster as a node, and it provides read access to\n Amazon ECR image repositories. For more information, see Pod\n Execution Role in the Amazon EKS User Guide.

              ", "smithy.api#required": {} } }, "subnets": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

              The IDs of subnets to launch your pods into. At this time, pods running on Fargate are\n not assigned public IP addresses, so only private subnets (with no direct route to an\n Internet Gateway) are accepted for this parameter.

              " + "smithy.api#documentation": "

              The IDs of subnets to launch your pods into. At this time, pods running on Fargate are not assigned public IP addresses, so only private subnets (with\n no direct route to an Internet Gateway) are accepted for this parameter.

              " } }, "selectors": { "target": "com.amazonaws.eks#FargateProfileSelectors", "traits": { - "smithy.api#documentation": "

              The selectors to match for pods to use this Fargate profile. Each selector must have an\n associated namespace. Optionally, you can also specify labels for a namespace. You may\n specify up to five selectors in a Fargate profile.

              " + "smithy.api#documentation": "

              The selectors to match for pods to use this Fargate profile. Each\n selector must have an associated namespace. Optionally, you can also specify labels for\n a namespace. You may specify up to five selectors in a Fargate\n profile.

              " } }, "clientRequestToken": { @@ -1346,7 +1346,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Creates a managed node group for an Amazon EKS cluster. You can only create a node group\n for your cluster that is equal to the current Kubernetes version for the cluster. All\n node groups are created with the latest AMI release version for the respective minor\n Kubernetes version of the cluster, unless you deploy a custom AMI using a launch\n template. For more information about using launch templates, see Launch\n template support.

              \n

              An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and associated Amazon EC2 instances that\n are managed by Amazon Web Services for an Amazon EKS cluster. Each node group uses a version of the Amazon EKS\n optimized Amazon Linux 2 AMI. For more information, see Managed\n Node Groups in the Amazon EKS User Guide.

              ", + "smithy.api#documentation": "

              Creates a managed node group for an Amazon EKS cluster. You can only create a\n node group for your cluster that is equal to the current Kubernetes version for the\n cluster. All node groups are created with the latest AMI release version for the\n respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using\n a launch template. For more information about using launch templates, see Launch\n template support.

              \n

              An Amazon EKS managed node group is an Amazon EC2\n Auto Scaling group and associated Amazon EC2 instances that are managed by\n Amazon Web Services for an Amazon EKS cluster. Each node group uses a version\n of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see Managed\n Node Groups in the Amazon EKS User Guide.

              ", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/node-groups", @@ -1400,7 +1400,7 @@ "amiType": { "target": "com.amazonaws.eks#AMITypes", "traits": { - "smithy.api#documentation": "

              The AMI type for your node group. GPU instance types should use the\n AL2_x86_64_GPU AMI type. Non-GPU instances should use the\n AL2_x86_64 AMI type. Arm instances should use the\n AL2_ARM_64 AMI type. All types use the Amazon EKS optimized Amazon Linux 2 AMI.\n If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify amiType,\n or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              " + "smithy.api#documentation": "

              The AMI type for your node group. GPU instance types should use the\n AL2_x86_64_GPU AMI type. Non-GPU instances should use the\n AL2_x86_64 AMI type. Arm instances should use the\n AL2_ARM_64 AMI type. All types use the Amazon EKS optimized\n Amazon Linux 2 AMI. If you specify launchTemplate, and your launch template uses a custom AMI,\n then don't specify amiType, or the node group deployment\n will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              " } }, "remoteAccess": { @@ -1412,7 +1412,7 @@ "nodeRole": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The Amazon EKS worker\n node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive\n permissions for these API calls through an IAM instance profile and associated\n policies. Before you can launch nodes and register them into a cluster, you must create\n an IAM role for those nodes to use when they are launched. For more information, see\n Amazon EKS node IAM role in the \n Amazon EKS User Guide\n .\n If you specify launchTemplate, then don't specify \n IamInstanceProfile\n in your launch template,\n or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              ", + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the IAM role to associate with your node group. The\n Amazon EKS worker node kubelet daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API calls\n through an IAM instance profile and associated policies. Before you can\n launch nodes and register them into a cluster, you must create an IAM\n role for those nodes to use when they are launched. For more information, see Amazon EKS node IAM role in the\n \n Amazon EKS User Guide\n . If you specify launchTemplate, then don't specify \n \n IamInstanceProfile\n in your launch template,\n or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              ", "smithy.api#required": {} } }, @@ -1468,7 +1468,7 @@ "releaseVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The AMI version of the Amazon EKS optimized AMI to use with your node group. By default,\n the latest available AMI version for the node group's current Kubernetes version is\n used. For more information, see Amazon EKS\n optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide. If you specify launchTemplate,\n and your launch template uses a custom AMI, then don't specify releaseVersion, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              " + "smithy.api#documentation": "

              The AMI version of the Amazon EKS optimized AMI to use with your node group.\n By default, the latest available AMI version for the node group's current Kubernetes\n version is used. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.\n If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion,\n or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              " } } } @@ -1540,7 +1540,7 @@ "preserve": { "target": "com.amazonaws.eks#Boolean", "traits": { - "smithy.api#documentation": "

              Specifying this option preserves the add-on software on your cluster but Amazon EKS stops managing any settings for the add-on. If an IAM account is associated with the add-on, it is not removed.

              ", + "smithy.api#documentation": "

              Specifying this option preserves the add-on software on your cluster but Amazon EKS stops managing any settings for the add-on. If an IAM\n account is associated with the add-on, it is not removed.

              ", "smithy.api#httpQuery": "preserve" } } @@ -1580,7 +1580,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Deletes the Amazon EKS cluster control plane.

              \n

              If you have active services in your cluster that are associated with a load balancer,\n you must delete those services before deleting the cluster so that the load balancers\n are deleted properly. Otherwise, you can have orphaned resources in your VPC that\n prevent you from being able to delete the VPC. For more information, see Deleting a\n Cluster in the Amazon EKS User Guide.

              \n

              If you have managed node groups or Fargate profiles attached to the cluster, you must\n delete them first. For more information, see DeleteNodegroup and DeleteFargateProfile.

              ", + "smithy.api#documentation": "

              Deletes the Amazon EKS cluster control plane.

              \n

              If you have active services in your cluster that are associated with a load balancer,\n you must delete those services before deleting the cluster so that the load balancers\n are deleted properly. Otherwise, you can have orphaned resources in your VPC that\n prevent you from being able to delete the VPC. For more information, see Deleting a\n Cluster in the Amazon EKS User Guide.

              \n

              If you have managed node groups or Fargate profiles attached to the\n cluster, you must delete them first. For more information, see DeleteNodegroup and DeleteFargateProfile.

              ", "smithy.api#http": { "method": "DELETE", "uri": "/clusters/{name}", @@ -1635,7 +1635,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Deletes an Fargate profile.

              \n

              When you delete a Fargate profile, any pods running on Fargate that were created with the\n profile are deleted. If those pods match another Fargate profile, then they are scheduled\n on Fargate with that profile. If they no longer match any Fargate profiles, then they are not\n scheduled on Fargate and they may remain in a pending state.

              \n

              Only one Fargate profile in a cluster can be in the DELETING status at a\n time. You must wait for a Fargate profile to finish deleting before you can delete any\n other profiles in that cluster.

              ", + "smithy.api#documentation": "

              Deletes an Fargate profile.

              \n

              When you delete a Fargate profile, any pods running on Fargate that were created with the profile are deleted. If those pods match\n another Fargate profile, then they are scheduled on Fargate with that profile. If they no longer match any Fargate profiles, then\n they are not scheduled on Fargate and they may remain in a pending\n state.

              \n

              Only one Fargate profile in a cluster can be in the\n DELETING status at a time. You must wait for a Fargate\n profile to finish deleting before you can delete any other profiles in that\n cluster.

              ", "smithy.api#http": { "method": "DELETE", "uri": "/clusters/{clusterName}/fargate-profiles/{fargateProfileName}", @@ -1649,7 +1649,7 @@ "clusterName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The name of the Amazon EKS cluster associated with the Fargate profile to delete.

              ", + "smithy.api#documentation": "

              The name of the Amazon EKS cluster associated with the Fargate\n profile to delete.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1718,7 +1718,7 @@ "clusterName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The name of the Amazon EKS cluster that is associated with your node group.

              ", + "smithy.api#documentation": "

              The name of the Amazon EKS cluster that is associated with your node\n group.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1773,7 +1773,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Deregisters a connected cluster to remove it from the Amazon EKS control plane.

              ", + "smithy.api#documentation": "

              Deregisters a connected cluster to remove it from the Amazon EKS control\n plane.

              ", "smithy.api#http": { "method": "DELETE", "uri": "/cluster-registrations/{name}", @@ -2235,7 +2235,7 @@ "clusterName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The name of the Amazon EKS cluster associated with the Fargate profile.

              ", + "smithy.api#documentation": "

              The name of the Amazon EKS cluster associated with the Fargate\n profile.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2463,7 +2463,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Returns descriptive information about an update against your Amazon EKS cluster or\n associated managed node group.

              \n

              When the status of the update is Succeeded, the update is complete. If an\n update fails, the status is Failed, and an error detail explains the reason\n for the failure.

              ", + "smithy.api#documentation": "

              Returns descriptive information about an update against your Amazon EKS\n cluster or associated managed node group.

              \n

              When the status of the update is Succeeded, the update is complete. If an\n update fails, the status is Failed, and an error detail explains the reason\n for the failure.

              ", "smithy.api#http": { "method": "GET", "uri": "/clusters/{name}/updates/{updateId}", @@ -2546,7 +2546,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Disassociates an identity provider configuration from a cluster. If you disassociate\n an identity provider from your cluster, users included in the provider can no longer\n access the cluster. However, you can still access the cluster with Amazon Web Services IAM\n users.

              ", + "smithy.api#documentation": "

              Disassociates an identity provider configuration from a cluster. If you disassociate\n an identity provider from your cluster, users included in the provider can no longer\n access the cluster. However, you can still access the cluster with Amazon Web Services\n IAM users.

              ", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/identity-provider-configs/disassociate", @@ -2601,7 +2601,7 @@ "provider": { "target": "com.amazonaws.eks#Provider", "traits": { - "smithy.api#documentation": "

              Key Management Service (KMS) key. Either the ARN or the alias can be used.

              " + "smithy.api#documentation": "

              Key Management Service (KMS) key. Either the ARN or the alias can be\n used.

              " } } }, @@ -2702,7 +2702,7 @@ "errorCode": { "target": "com.amazonaws.eks#ErrorCode", "traits": { - "smithy.api#documentation": "

              A brief description of the error.

              \n
                \n
              • \n

                \n SubnetNotFound: We couldn't find one of the\n subnets associated with the cluster.

                \n
              • \n
              • \n

                \n SecurityGroupNotFound: We couldn't find one\n of the security groups associated with the cluster.

                \n
              • \n
              • \n

                \n EniLimitReached: You have reached the elastic\n network interface limit for your account.

                \n
              • \n
              • \n

                \n IpNotAvailable: A subnet associated with the\n cluster doesn't have any free IP addresses.

                \n
              • \n
              • \n

                \n AccessDenied: You don't have permissions to\n perform the specified operation.

                \n
              • \n
              • \n

                \n OperationNotPermitted: The service role\n associated with the cluster doesn't have the required access permissions for\n Amazon EKS.

                \n
              • \n
              • \n

                \n VpcIdNotFound: We couldn't find the VPC\n associated with the cluster.

                \n
              • \n
              " + "smithy.api#documentation": "

              A brief description of the error.

              \n
                \n
              • \n

                \n SubnetNotFound: We couldn't find one of the\n subnets associated with the cluster.

                \n
              • \n
              • \n

                \n SecurityGroupNotFound: We couldn't find one\n of the security groups associated with the cluster.

                \n
              • \n
              • \n

                \n EniLimitReached: You have reached the elastic\n network interface limit for your account.

                \n
              • \n
              • \n

                \n IpNotAvailable: A subnet associated with the\n cluster doesn't have any free IP addresses.

                \n
              • \n
              • \n

                \n AccessDenied: You don't have permissions to\n perform the specified operation.

                \n
              • \n
              • \n

                \n OperationNotPermitted: The service role\n associated with the cluster doesn't have the required access permissions for\n Amazon EKS.

                \n
              • \n
              • \n

                \n VpcIdNotFound: We couldn't find the VPC\n associated with the cluster.

                \n
              • \n
              " } }, "errorMessage": { @@ -2746,13 +2746,13 @@ "clusterName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The name of the Amazon EKS cluster that the Fargate profile belongs to.

              " + "smithy.api#documentation": "

              The name of the Amazon EKS cluster that the Fargate profile\n belongs to.

              " } }, "createdAt": { "target": "com.amazonaws.eks#Timestamp", "traits": { - "smithy.api#documentation": "

              The Unix epoch timestamp in seconds for when the Fargate profile was created.

              " + "smithy.api#documentation": "

              The Unix epoch timestamp in seconds for when the Fargate profile was\n created.

              " } }, "podExecutionRoleArn": { @@ -2986,13 +2986,28 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.eks#IpFamily": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ipv4", + "name": "IPV4" + }, + { + "value": "ipv6", + "name": "IPV6" + } + ] + } + }, "com.amazonaws.eks#Issue": { "type": "structure", "members": { "code": { "target": "com.amazonaws.eks#NodegroupIssueCode", "traits": { - "smithy.api#documentation": "

              A brief description of the error.

              \n
                \n
              • \n

                \n AccessDenied: Amazon EKS or one or more of your\n managed nodes is failing to authenticate or authorize with your Kubernetes\n cluster API server.

                \n
              • \n
              • \n

                \n AsgInstanceLaunchFailures: Your Auto Scaling group is\n experiencing failures while attempting to launch instances.

                \n
              • \n
              • \n

                \n AutoScalingGroupNotFound: We couldn't find\n the Auto Scaling group associated with the managed node group. You may be able to\n recreate an Auto Scaling group with the same settings to recover.

                \n
              • \n
              • \n

                \n ClusterUnreachable: Amazon EKS or one or more of\n your managed nodes is unable to to communicate with your Kubernetes cluster API\n server. This can happen if there are network disruptions or if API servers are\n timing out processing requests.

                \n
              • \n
              • \n

                \n Ec2LaunchTemplateNotFound: We couldn't find\n the Amazon EC2 launch template for your managed node group. You may be able to\n recreate a launch template with the same settings to recover.

                \n
              • \n
              • \n

                \n Ec2LaunchTemplateVersionMismatch: The Amazon EC2\n launch template version for your managed node group does not match the version\n that Amazon EKS created. You may be able to revert to the version that Amazon EKS created\n to recover.

                \n
              • \n
              • \n

                \n Ec2SecurityGroupDeletionFailure: We could not\n delete the remote access security group for your managed node group. Remove any\n dependencies from the security group.

                \n
              • \n
              • \n

                \n Ec2SecurityGroupNotFound: We couldn't find\n the cluster security group for the cluster. You must recreate your\n cluster.

                \n
              • \n
              • \n

                \n Ec2SubnetInvalidConfiguration: One or more\n Amazon EC2 subnets specified for a node group do not automatically assign public IP\n addresses to instances launched into it. If you want your instances to be\n assigned a public IP address, then you need to enable the auto-assign\n public IP address setting for the subnet. See Modifying\n the public IPv4 addressing attribute for your subnet in the Amazon\n VPC User Guide.

                \n
              • \n
              • \n

                \n IamInstanceProfileNotFound: We couldn't find\n the IAM instance profile for your managed node group. You may be able to\n recreate an instance profile with the same settings to recover.

                \n
              • \n
              • \n

                \n IamNodeRoleNotFound: We couldn't find the\n IAM role for your managed node group. You may be able to recreate an IAM role\n with the same settings to recover.

                \n
              • \n
              • \n

                \n InstanceLimitExceeded: Your Amazon Web Services account is\n unable to launch any more instances of the specified instance type. You may be\n able to request an Amazon EC2 instance limit increase to recover.

                \n
              • \n
              • \n

                \n InsufficientFreeAddresses: One or more of the\n subnets associated with your managed node group does not have enough available\n IP addresses for new nodes.

                \n
              • \n
              • \n

                \n InternalFailure: These errors are usually\n caused by an Amazon EKS server-side issue.

                \n
              • \n
              • \n

                \n NodeCreationFailure: Your launched instances\n are unable to register with your Amazon EKS cluster. Common causes of this failure\n are insufficient node IAM role\n permissions or lack of outbound internet access for the nodes.

                \n
              • \n
              " + "smithy.api#documentation": "

              A brief description of the error.

              \n
                \n
              • \n

                \n AccessDenied: Amazon EKS or one or\n more of your managed nodes is failing to authenticate or authorize with your\n Kubernetes cluster API server.

                \n
              • \n
              • \n

                \n AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch\n instances.

                \n
              • \n
              • \n

                \n AutoScalingGroupNotFound: We couldn't find\n the Auto Scaling group associated with the managed node group. You may be\n able to recreate an Auto Scaling group with the same settings to\n recover.

                \n
              • \n
              • \n

                \n ClusterUnreachable: Amazon EKS or one\n or more of your managed nodes is unable to to communicate with your Kubernetes\n cluster API server. This can happen if there are network disruptions or if API\n servers are timing out processing requests.

                \n
              • \n
              • \n

                \n Ec2LaunchTemplateNotFound: We couldn't find\n the Amazon EC2 launch template for your managed node group. You may be\n able to recreate a launch template with the same settings to recover.

                \n
              • \n
              • \n

                \n Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not\n match the version that Amazon EKS created. You may be able to revert to\n the version that Amazon EKS created to recover.

                \n
              • \n
              • \n

                \n Ec2SecurityGroupDeletionFailure: We could not\n delete the remote access security group for your managed node group. Remove any\n dependencies from the security group.

                \n
              • \n
              • \n

                \n Ec2SecurityGroupNotFound: We couldn't find\n the cluster security group for the cluster. You must recreate your\n cluster.

                \n
              • \n
              • \n

                \n Ec2SubnetInvalidConfiguration: One or more\n Amazon EC2 subnets specified for a node group do not automatically\n assign public IP addresses to instances launched into it. If you want your\n instances to be assigned a public IP address, then you need to enable the\n auto-assign public IP address setting for the subnet. See\n Modifying\n the public IPv4 addressing attribute for your subnet in the Amazon\n VPC User Guide.

                \n
              • \n
              • \n

                \n IamInstanceProfileNotFound: We couldn't find\n the IAM instance profile for your managed node group. You may be\n able to recreate an instance profile with the same settings to recover.

                \n
              • \n
              • \n

                \n IamNodeRoleNotFound: We couldn't find the\n IAM role for your managed node group. You may be able to\n recreate an IAM role with the same settings to recover.

                \n
              • \n
              • \n

                \n InstanceLimitExceeded: Your Amazon Web Services account is unable to launch any more instances of the specified instance\n type. You may be able to request an Amazon EC2 instance limit increase\n to recover.

                \n
              • \n
              • \n

                \n InsufficientFreeAddresses: One or more of the\n subnets associated with your managed node group does not have enough available\n IP addresses for new nodes.

                \n
              • \n
              • \n

                \n InternalFailure: These errors are usually\n caused by an Amazon EKS server-side issue.

                \n
              • \n
              • \n

                \n NodeCreationFailure: Your launched instances\n are unable to register with your Amazon EKS cluster. Common causes of this failure\n are insufficient node IAM role\n permissions or lack of outbound internet access for the nodes.

                \n
              • \n
              " } }, "message": { @@ -3024,7 +3039,13 @@ "serviceIpv4Cidr": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a\n block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR\n blocks. We recommend that you specify a block that does not overlap with resources in\n other networks that are peered or connected to your VPC. The block must meet the\n following requirements:

              \n
                \n
              • \n

                Within one of the following private IP address blocks: 10.0.0.0/8,\n 172.16.0.0/12, or 192.168.0.0/16.

                \n
              • \n
              • \n

                Doesn't overlap with any CIDR block assigned to the VPC that you selected for\n VPC.

                \n
              • \n
              • \n

                Between /24 and /12.

                \n
              • \n
              \n \n

              You can only specify a custom CIDR block when you create a cluster and can't\n change this value once the cluster is created.

              \n
              " + "smithy.api#documentation": "

              Don't specify a value if you select ipv6 for ipFamily. The CIDR block to assign Kubernetes service IP addresses from.\n If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16\n or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that does not\n overlap with resources in other networks that are peered or connected to your VPC. The\n block must meet the following requirements:

              \n
                \n
              • \n

                Within one of the following private IP address blocks: 10.0.0.0/8,\n 172.16.0.0/12, or 192.168.0.0/16.

                \n
              • \n
              • \n

                Doesn't overlap with any CIDR block assigned to the VPC that you selected for\n VPC.

                \n
              • \n
              • \n

                Between /24 and /12.

                \n
              • \n
              \n \n

              You can only specify a custom CIDR block when you create a cluster and can't\n change this value once the cluster is created.

              \n
              " + } + }, + "ipFamily": { + "target": "com.amazonaws.eks#IpFamily", + "traits": { + "smithy.api#documentation": "

              Specify which IP version is used to assign Kubernetes Pod and Service IP addresses. If\n you don't specify a value, ipv4 is used by default. You can only specify an\n IP family when you create a cluster and can't change this value once the cluster is\n created. If you specify ipv6, the VPC and subnets that you specify for\n cluster creation must have both IPv4 and IPv6 CIDR blocks assigned to them.

              \n

              You can only specify ipv6 for 1.21 and later clusters that use version\n 1.10.0 or later of the Amazon VPC CNI add-on. If you specify ipv6, then ensure\n that your VPC meets the requirements and that you're familiar with the considerations\n listed in Assigning\n IPv6 addresses to Pods and Services in the Amazon EKS User Guide. If\n you specify ipv6, Kubernetes assigns Service and Pod addresses from the\n unique local address range (fc00::/7). You can't specify a custom IPv6 CIDR\n block.

              " } } }, @@ -3038,12 +3059,24 @@ "serviceIpv4Cidr": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The CIDR block that Kubernetes service IP addresses are assigned from. If you didn't\n specify a CIDR block when you created the cluster, then Kubernetes assigns addresses\n from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this was specified, then\n it was specified when the cluster was created and it cannot be changed.

              " + "smithy.api#documentation": "

              The CIDR block that Kubernetes Pod and Service IP addresses are assigned from.\n Kubernetes assigns addresses from an IPv4 CIDR block assigned to a subnet that the node\n is in. If you didn't specify a CIDR block when you created the cluster, then Kubernetes\n assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this\n was specified, then it was specified when the cluster was created and it can't be\n changed.

              " + } + }, + "serviceIpv6Cidr": { + "target": "com.amazonaws.eks#String", + "traits": { + "smithy.api#documentation": "

              The CIDR block that Kubernetes Pod and Service IP addresses are assigned from if you\n created a 1.21 or later cluster with version 1.10.0 or later of the Amazon VPC CNI add-on and\n specified ipv6 for ipFamily when you\n created the cluster. Kubernetes assigns addresses from the unique local address range\n (fc00::/7).

              " + } + }, + "ipFamily": { + "target": "com.amazonaws.eks#IpFamily", + "traits": { + "smithy.api#documentation": "

              The IP family used to assign Kubernetes Pod and Service IP addresses. The IP family is\n always ipv4, unless you have a 1.21 or later cluster running\n version 1.10.0 or later of the Amazon VPC CNI add-on and specified ipv6 when you\n created the cluster.

              " } } }, "traits": { - "smithy.api#documentation": "

              The Kubernetes network configuration for the cluster.

              " + "smithy.api#documentation": "

              The Kubernetes network configuration for the cluster. The response contains a value\n for serviceIpv6Cidr or serviceIpv4Cidr, but not both.

              " } }, "com.amazonaws.eks#LaunchTemplateSpecification": { @@ -3069,7 +3102,7 @@ } }, "traits": { - "smithy.api#documentation": "

              An object representing a node group launch template specification. The launch template\n cannot include \n SubnetId\n , \n IamInstanceProfile\n , \n RequestSpotInstances\n , \n HibernationOptions\n , or \n TerminateInstances\n , or the node group deployment or\n update will fail. For more information about launch templates, see \n CreateLaunchTemplate\n in the Amazon EC2 API Reference.\n For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              \n

              Specify either name or id, but not both.

              " + "smithy.api#documentation": "

              An object representing a node group launch template specification. The launch template\n cannot include \n SubnetId\n , \n IamInstanceProfile\n , \n RequestSpotInstances\n , \n HibernationOptions\n , or \n TerminateInstances\n , or the node group deployment or\n update will fail. For more information about launch templates, see \n CreateLaunchTemplate\n in the Amazon EC2 API\n Reference. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              \n

              Specify either name or id, but not both.

              " } }, "com.amazonaws.eks#ListAddons": { @@ -3189,7 +3222,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Lists the Amazon EKS clusters in your Amazon Web Services account in the specified Region.

              ", + "smithy.api#documentation": "

              Lists the Amazon EKS clusters in your Amazon Web Services account in the\n specified Region.

              ", "smithy.api#http": { "method": "GET", "uri": "/clusters", @@ -3279,7 +3312,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Lists the Fargate profiles associated with the specified cluster in your Amazon Web Services\n account in the specified Region.

              ", + "smithy.api#documentation": "

              Lists the Fargate profiles associated with the specified cluster in\n your Amazon Web Services account in the specified Region.

              ", "smithy.api#http": { "method": "GET", "uri": "/clusters/{clusterName}/fargate-profiles", @@ -3326,7 +3359,7 @@ "fargateProfileNames": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

              A list of all of the Fargate profiles associated with the specified cluster.

              " + "smithy.api#documentation": "

              A list of all of the Fargate profiles associated with the specified\n cluster.

              " } }, "nextToken": { @@ -3457,7 +3490,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Lists the Amazon EKS managed node groups associated with the specified cluster in your\n Amazon Web Services account in the specified Region. Self-managed node groups are not listed.

              ", + "smithy.api#documentation": "

              Lists the Amazon EKS managed node groups associated with the specified cluster\n in your Amazon Web Services account in the specified Region. Self-managed node groups are\n not listed.

              ", "smithy.api#http": { "method": "GET", "uri": "/clusters/{clusterName}/node-groups", @@ -3477,7 +3510,7 @@ "clusterName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The name of the Amazon EKS cluster that you would like to list node groups in.

              ", + "smithy.api#documentation": "

              The name of the Amazon EKS cluster that you would like to list node groups\n in.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -3597,7 +3630,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Lists the updates associated with an Amazon EKS cluster or managed node group in your Amazon Web Services\n account, in the specified Region.

              ", + "smithy.api#documentation": "

              Lists the updates associated with an Amazon EKS cluster or managed node group\n in your Amazon Web Services account, in the specified Region.

              ", "smithy.api#http": { "method": "GET", "uri": "/clusters/{name}/updates", @@ -3691,7 +3724,7 @@ "enabled": { "target": "com.amazonaws.eks#BoxedBoolean", "traits": { - "smithy.api#documentation": "

              If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs. If a\n log type isn't enabled, that log type doesn't export its control plane logs. Each\n individual log type can be enabled or disabled independently.

              " + "smithy.api#documentation": "

              If a log type is enabled, that log type exports its control plane logs to CloudWatch Logs. If a log type isn't enabled, that log type doesn't export its control\n plane logs. Each individual log type can be enabled or disabled independently.

              " } } }, @@ -3782,7 +3815,7 @@ "releaseVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              If the node group was deployed using a launch template with a custom AMI, then this is\n the AMI ID that was specified in the launch template. For node groups that weren't\n deployed using a launch template, this is the version of the Amazon EKS optimized AMI that\n the node group was deployed with.

              " + "smithy.api#documentation": "

              If the node group was deployed using a launch template with a custom AMI, then this is\n the AMI ID that was specified in the launch template. For node groups that weren't\n deployed using a launch template, this is the version of the Amazon EKS\n optimized AMI that the node group was deployed with.

              " } }, "createdAt": { @@ -3842,19 +3875,19 @@ "nodeRole": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The IAM role associated with your node group. The Amazon EKS node kubelet\n daemon makes calls to Amazon Web Services APIs on your behalf. Nodes receive permissions for these API\n calls through an IAM instance profile and associated policies.

              " + "smithy.api#documentation": "

              The IAM role associated with your node group. The Amazon EKS\n node kubelet daemon makes calls to Amazon Web Services APIs on your behalf.\n Nodes receive permissions for these API calls through an IAM instance\n profile and associated policies.

              " } }, "labels": { "target": "com.amazonaws.eks#labelsMap", "traits": { - "smithy.api#documentation": "

              The Kubernetes labels applied to the nodes in the node group.

              \n \n

              Only labels that are applied with the Amazon EKS API are shown here. There may be other\n Kubernetes labels applied to the nodes in this group.

              \n
              " + "smithy.api#documentation": "

              The Kubernetes labels applied to the nodes in the node group.

              \n \n

              Only labels that are applied with the Amazon EKS API are shown here. There\n may be other Kubernetes labels applied to the nodes in this group.

              \n
              " } }, "taints": { "target": "com.amazonaws.eks#taintsList", "traits": { - "smithy.api#documentation": "

              The Kubernetes taints to be applied to the nodes in the node group when they are\n created. Effect is one of No_Schedule, Prefer_No_Schedule, or No_Execute. Kubernetes taints\n can be used together with tolerations to control how workloads are scheduled to your\n nodes.

              " + "smithy.api#documentation": "

              The Kubernetes taints to be applied to the nodes in the node group when they are\n created. Effect is one of No_Schedule, Prefer_No_Schedule, or\n No_Execute. Kubernetes taints can be used together with tolerations to\n control how workloads are scheduled to your nodes.

              " } }, "resources": { @@ -4023,13 +4056,13 @@ "maxSize": { "target": "com.amazonaws.eks#Capacity", "traits": { - "smithy.api#documentation": "

              The maximum number of nodes that the managed node group can scale out to. For\n information about the maximum number that you can specify, see Amazon EKS service\n quotas in the Amazon EKS User Guide.

              " + "smithy.api#documentation": "

              The maximum number of nodes that the managed node group can scale out to. For\n information about the maximum number that you can specify, see Amazon EKS service quotas in the Amazon EKS User Guide.

              " } }, "desiredSize": { "target": "com.amazonaws.eks#ZeroCapacity", "traits": { - "smithy.api#documentation": "

              The current number of nodes that the managed node group should maintain.

              \n \n

              If you use Cluster Autoscaler, you shouldn't change the desiredSize value\n directly, as this can cause the Cluster Autoscaler to suddenly scale up or scale\n down.

              \n
              \n

              Whenever this parameter changes, the number of worker nodes in the node group is\n updated to the specified size. If this parameter is given a value that is smaller than\n the current number of running worker nodes, the necessary number of worker nodes are\n terminated to match the given value.\n \n When using CloudFormation, no action occurs if you remove this parameter from your CFN\n template.

              \n

              This parameter can be different from minSize in some cases, such as when starting with\n extra hosts for testing. This parameter can also be different when you want to start\n with an estimated number of needed hosts, but let Cluster Autoscaler reduce the number\n if there are too many. When Cluster Autoscaler is used, the desiredSize parameter is\n altered by Cluster Autoscaler (but can be out-of-date for short periods of time).\n Cluster Autoscaler doesn't scale a managed node group lower than minSize or higher than\n maxSize.

              " + "smithy.api#documentation": "

              The current number of nodes that the managed node group should maintain.

              \n \n

              If you use Cluster Autoscaler, you shouldn't change the desiredSize value\n directly, as this can cause the Cluster Autoscaler to suddenly scale up or scale\n down.

              \n
              \n

              Whenever this parameter changes, the number of worker nodes in the node group is\n updated to the specified size. If this parameter is given a value that is smaller than\n the current number of running worker nodes, the necessary number of worker nodes are\n terminated to match the given value.\n \n When using CloudFormation, no action occurs if you remove this parameter from your CFN\n template.

              \n

              This parameter can be different from minSize in some cases, such as when starting with\n extra hosts for testing. This parameter can also be different when you want to start\n with an estimated number of needed hosts, but let Cluster Autoscaler reduce the number\n if there are too many. When Cluster Autoscaler is used, the desiredSize parameter is\n altered by Cluster Autoscaler (but can be out-of-date for short periods of time).\n Cluster Autoscaler doesn't scale a managed node group lower than minSize or higher than\n maxSize.

              " } } }, @@ -4078,13 +4111,13 @@ "maxUnavailable": { "target": "com.amazonaws.eks#NonZeroInteger", "traits": { - "smithy.api#documentation": "

              The maximum number of nodes unavailable at once during a version update. Nodes will be updated in parallel. \n This value or maxUnavailablePercentage is required to have a value.The maximum number\n is 100.

              " + "smithy.api#documentation": "

              The maximum number of nodes unavailable at once during a version update. Nodes will be\n updated in parallel. This value or maxUnavailablePercentage is required to\n have a value.The maximum number is 100.

              " } }, "maxUnavailablePercentage": { "target": "com.amazonaws.eks#PercentCapacity", "traits": { - "smithy.api#documentation": "

              The maximum percentage of nodes unavailable during a version update. This percentage of nodes will be\n updated in parallel, up to 100 nodes at once. This value or maxUnavailable is required to have a value.

              " + "smithy.api#documentation": "

              The maximum percentage of nodes unavailable during a version update. This percentage\n of nodes will be updated in parallel, up to 100 nodes at once. This value or\n maxUnavailable is required to have a value.

              " } } }, @@ -4259,7 +4292,7 @@ "requiredClaims": { "target": "com.amazonaws.eks#requiredClaimsMap", "traits": { - "smithy.api#documentation": "

              The key value pairs that describe required claims in the identity token. If set, each\n claim is verified to be present in the token with a matching value. For the maximum\n number of claims that you can require, see Amazon EKS service quotas in the\n Amazon EKS User Guide.

              " + "smithy.api#documentation": "

              The key value pairs that describe required claims in the identity token. If set, each\n claim is verified to be present in the token with a matching value. For the maximum\n number of claims that you can require, see Amazon EKS service\n quotas in the Amazon EKS User Guide.

              " } } }, @@ -4283,12 +4316,12 @@ "keyArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              Amazon Resource Name (ARN) or alias of the KMS key. The KMS key must be symmetric, created in the same\n region as the cluster, and if the KMS key was created in a different account, the user\n must have access to the KMS key. For more information, see Allowing\n Users in Other Accounts to Use a KMS key in the Key Management Service\n Developer Guide.

              " + "smithy.api#documentation": "

              Amazon Resource Name (ARN) or alias of the KMS key. The KMS key must be symmetric, created in the same\n region as the cluster, and if the KMS key was created in a different account, the user\n must have access to the KMS key. For more information, see Allowing\n Users in Other Accounts to Use a KMS key in the Key Management Service Developer Guide.

              " } } }, "traits": { - "smithy.api#documentation": "

              Identifies the Key Management Service (KMS) key used to encrypt the secrets.

              " + "smithy.api#documentation": "

              Identifies the Key Management Service (KMS) key used to encrypt the\n secrets.

              " } }, "com.amazonaws.eks#RegisterCluster": { @@ -4326,7 +4359,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Connects a Kubernetes cluster to the Amazon EKS control plane.

              \n

              Any Kubernetes cluster can be connected to the Amazon EKS control plane to view current information about the cluster and its nodes.\n

              \n

              Cluster connection requires two steps. First, send a \n RegisterClusterRequest\n to add it to the Amazon EKS control plane.

              \n

              Second, a Manifest containing the activationID and activationCode must be applied to the Kubernetes cluster through it's native provider to provide visibility.

              \n \n

              After the Manifest is updated and applied, then the connected cluster is visible to the Amazon EKS control plane. If the Manifest is not applied within three days, \n then the connected cluster will no longer be visible and must be deregistered. See DeregisterCluster.

              ", + "smithy.api#documentation": "

              Connects a Kubernetes cluster to the Amazon EKS control plane.

              \n

              Any Kubernetes cluster can be connected to the Amazon EKS control plane to\n view current information about the cluster and its nodes.

              \n

              Cluster connection requires two steps. First, send a \n RegisterClusterRequest\n to add it to the Amazon EKS\n control plane.

              \n

              Second, a Manifest containing the activationID and\n activationCode must be applied to the Kubernetes cluster through it's\n native provider to provide visibility.

              \n \n

              After the Manifest is updated and applied, then the connected cluster is visible to\n the Amazon EKS control plane. If the Manifest is not applied within three days,\n then the connected cluster will no longer be visible and must be deregistered. See DeregisterCluster.

              ", "smithy.api#http": { "method": "POST", "uri": "/cluster-registrations", @@ -4354,7 +4387,7 @@ "clientRequestToken": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

              ", + "smithy.api#documentation": "

              Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request.

              ", "smithy.api#idempotencyToken": {} } }, @@ -4380,13 +4413,13 @@ "ec2SshKey": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The Amazon EC2 SSH key that provides access for SSH communication with the nodes in the\n managed node group. For more information, see Amazon EC2 key\n pairs and Linux instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

              " + "smithy.api#documentation": "

              The Amazon EC2 SSH key that provides access for SSH communication with the\n nodes in the managed node group. For more information, see Amazon EC2 key pairs and Linux instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances.

              " } }, "sourceSecurityGroups": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

              The security groups that are allowed SSH access (port 22) to the nodes. If you specify\n an Amazon EC2 SSH key but do not specify a source security group when you create a managed\n node group, then port 22 on the nodes is opened to the internet (0.0.0.0/0). For more\n information, see Security Groups for Your VPC in the\n Amazon Virtual Private Cloud User Guide.

              " + "smithy.api#documentation": "

              The security groups that are allowed SSH access (port 22) to the nodes. If you specify\n an Amazon EC2 SSH key but do not specify a source security group when you create\n a managed node group, then port 22 on the nodes is opened to the internet (0.0.0.0/0).\n For more information, see Security Groups for Your VPC in the\n Amazon Virtual Private Cloud User Guide.

              " } } }, @@ -4621,7 +4654,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Associates the specified tags to a resource with the specified\n resourceArn. If existing tags on a resource are not specified in the\n request parameters, they are not changed. When a resource is deleted, the tags\n associated with that resource are deleted as well. Tags that you create for Amazon EKS\n resources do not propagate to any other resources associated with the cluster. For\n example, if you tag a cluster with this operation, that tag does not automatically\n propagate to the subnets and nodes associated with the cluster.

              ", + "smithy.api#documentation": "

              Associates the specified tags to a resource with the specified\n resourceArn. If existing tags on a resource are not specified in the\n request parameters, they are not changed. When a resource is deleted, the tags\n associated with that resource are deleted as well. Tags that you create for Amazon EKS resources do not propagate to any other resources associated with the\n cluster. For example, if you tag a cluster with this operation, that tag does not\n automatically propagate to the subnets and nodes associated with the cluster.

              ", "smithy.api#http": { "method": "POST", "uri": "/tags/{resourceArn}", @@ -4736,7 +4769,7 @@ } }, "traits": { - "smithy.api#documentation": "

              At least one of your specified cluster subnets is in an Availability Zone that does\n not support Amazon EKS. The exception output specifies the supported Availability Zones for\n your account, from which you can choose subnets for your cluster.

              ", + "smithy.api#documentation": "

              At least one of your specified cluster subnets is in an Availability Zone that does\n not support Amazon EKS. The exception output specifies the supported\n Availability Zones for your account, from which you can choose subnets for your\n cluster.

              ", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -4955,7 +4988,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Updates an Amazon EKS cluster configuration. Your cluster continues to function during the\n update. The response output includes an update ID that you can use to track the status\n of your cluster update with the DescribeUpdate API operation.

              \n

              You can use this API operation to enable or disable exporting the Kubernetes control\n plane logs for your cluster to CloudWatch Logs. By default, cluster control plane logs aren't\n exported to CloudWatch Logs. For more information, see Amazon EKS\n Cluster Control Plane Logs in the\n \n Amazon EKS User Guide\n .

              \n \n

              CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported\n control plane logs. For more information, see CloudWatch Pricing.

              \n
              \n

              You can also use this API operation to enable or disable public and private access to\n your cluster's Kubernetes API server endpoint. By default, public access is enabled, and\n private access is disabled. For more information, see Amazon EKS cluster\n endpoint access control in the \n Amazon EKS User Guide\n .

              \n \n

              You can't update the subnets or security group IDs for an existing cluster.

              \n
              \n

              Cluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.

              ", + "smithy.api#documentation": "

              Updates an Amazon EKS cluster configuration. Your cluster continues to\n function during the update. The response output includes an update ID that you can use\n to track the status of your cluster update with the DescribeUpdate API\n operation.

              \n

              You can use this API operation to enable or disable exporting the Kubernetes control\n plane logs for your cluster to CloudWatch Logs. By default, cluster control plane\n logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the\n \n Amazon EKS User Guide\n .

              \n \n

              CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

              \n
              \n

              You can also use this API operation to enable or disable public and private access to\n your cluster's Kubernetes API server endpoint. By default, public access is enabled, and\n private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

              \n \n

              You can't update the subnets or security group IDs for an existing cluster.

              \n
              \n

              Cluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.

              ", "smithy.api#http": { "method": "POST", "uri": "/clusters/{name}/update-config", @@ -4980,7 +5013,7 @@ "logging": { "target": "com.amazonaws.eks#Logging", "traits": { - "smithy.api#documentation": "

              Enable or disable exporting the Kubernetes control plane logs for your cluster to\n CloudWatch Logs. By default, cluster control plane logs aren't exported to CloudWatch Logs. For more\n information, see Amazon EKS cluster control plane logs in the\n \n Amazon EKS User Guide\n .

              \n \n

              CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported\n control plane logs. For more information, see CloudWatch Pricing.

              \n
              " + "smithy.api#documentation": "

              Enable or disable exporting the Kubernetes control plane logs for your cluster to\n CloudWatch Logs. By default, cluster control plane logs aren't exported to\n CloudWatch Logs. For more information, see Amazon EKS cluster control plane logs in the\n \n Amazon EKS User Guide\n .

              \n \n

              CloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.

              \n
              " } }, "clientRequestToken": { @@ -5029,7 +5062,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Updates an Amazon EKS cluster to the specified Kubernetes version. Your cluster continues\n to function during the update. The response output includes an update ID that you can\n use to track the status of your cluster update with the DescribeUpdate\n API operation.

              \n

              Cluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.

              \n

              If your cluster has managed node groups attached to it, all of your node groups’\n Kubernetes versions must match the cluster’s Kubernetes version in order to update the\n cluster to a new Kubernetes version.

              ", + "smithy.api#documentation": "

              Updates an Amazon EKS cluster to the specified Kubernetes version. Your\n cluster continues to function during the update. The response output includes an update\n ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.

              \n

              Cluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING (this status transition is\n eventually consistent). When the update is complete (either Failed or\n Successful), the cluster status moves to Active.

              \n

              If your cluster has managed node groups attached to it, all of your node groups’\n Kubernetes versions must match the cluster’s Kubernetes version in order to update the\n cluster to a new Kubernetes version.

              ", "smithy.api#http": { "method": "POST", "uri": "/clusters/{name}/updates", @@ -5124,7 +5157,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Updates an Amazon EKS managed node group configuration. Your node group continues to\n function during the update. The response output includes an update ID that you can use\n to track the status of your node group update with the DescribeUpdate\n API operation. Currently you can update the Kubernetes labels for a node group or the\n scaling configuration.

              ", + "smithy.api#documentation": "

              Updates an Amazon EKS managed node group configuration. Your node group\n continues to function during the update. The response output includes an update ID that\n you can use to track the status of your node group update with the DescribeUpdate API operation. Currently you can update the Kubernetes\n labels for a node group or the scaling configuration.

              ", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/node-groups/{nodegroupName}/update-config", @@ -5138,7 +5171,7 @@ "clusterName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The name of the Amazon EKS cluster that the managed node group resides in.

              ", + "smithy.api#documentation": "

              The name of the Amazon EKS cluster that the managed node group resides\n in.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5221,7 +5254,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Updates the Kubernetes version or AMI version of an Amazon EKS managed node group.

              \n

              You can update a node group using a launch template only if the node group was\n originally deployed with a launch template. If you need to update a custom AMI in a node\n group that was deployed with a launch template, then update your custom AMI, specify the\n new ID in a new version of the launch template, and then update the node group to the\n new version of the launch template.

              \n

              If you update without a launch template, then you can update to the latest available\n AMI version of a node group's current Kubernetes version by not specifying a Kubernetes\n version in the request. You can update to the latest AMI version of your cluster's\n current Kubernetes version by specifying your cluster's Kubernetes version in the\n request. For more information, see Amazon EKS\n optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.

              \n

              You cannot roll back a node group to an earlier Kubernetes version or AMI\n version.

              \n

              When a node in a managed node group is terminated due to a scaling action or update,\n the pods in that node are drained first. Amazon EKS attempts to drain the nodes gracefully\n and will fail if it is unable to do so. You can force the update if Amazon EKS\n is unable to drain the nodes as a result of a pod disruption budget issue.

              ", + "smithy.api#documentation": "

              Updates the Kubernetes version or AMI version of an Amazon EKS managed node\n group.

              \n

              You can update a node group using a launch template only if the node group was\n originally deployed with a launch template. If you need to update a custom AMI in a node\n group that was deployed with a launch template, then update your custom AMI, specify the\n new ID in a new version of the launch template, and then update the node group to the\n new version of the launch template.

              \n

              If you update without a launch template, then you can update to the latest available\n AMI version of a node group's current Kubernetes version by not specifying a Kubernetes\n version in the request. You can update to the latest AMI version of your cluster's\n current Kubernetes version by specifying your cluster's Kubernetes version in the\n request. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.

              \n

              You cannot roll back a node group to an earlier Kubernetes version or AMI\n version.

              \n

              When a node in a managed node group is terminated due to a scaling action or update,\n the pods in that node are drained first. Amazon EKS attempts to drain the nodes\n gracefully and will fail if it is unable to do so. You can force the update\n if Amazon EKS is unable to drain the nodes as a result of a pod disruption\n budget issue.

              ", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/node-groups/{nodegroupName}/update-version", @@ -5235,7 +5268,7 @@ "clusterName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The name of the Amazon EKS cluster that is associated with the managed node group to\n update.

              ", + "smithy.api#documentation": "

              The name of the Amazon EKS cluster that is associated with the managed node\n group to update.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5257,7 +5290,7 @@ "releaseVersion": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The AMI version of the Amazon EKS optimized AMI to use for the update. By default, the\n latest available AMI version for the node group's Kubernetes version is used. For more\n information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the\n Amazon EKS User Guide. If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify \n releaseVersion, or the node group update will fail.\n For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              " + "smithy.api#documentation": "

              The AMI version of the Amazon EKS optimized AMI to use for the update. By\n default, the latest available AMI version for the node group's Kubernetes version is\n used. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.\n If you specify launchTemplate, and your launch template uses a custom AMI, then don't specify releaseVersion,\n or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.

              " } }, "launchTemplate": { @@ -5502,36 +5535,36 @@ "subnetIds": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

              Specify subnets for your Amazon EKS nodes. Amazon EKS creates cross-account elastic network\n interfaces in these subnets to allow communication between your nodes and the Kubernetes\n control plane.

              " + "smithy.api#documentation": "

              Specify subnets for your Amazon EKS nodes. Amazon EKS creates\n cross-account elastic network interfaces in these subnets to allow communication between\n your nodes and the Kubernetes control plane.

              " } }, "securityGroupIds": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

              Specify one or more security groups for the cross-account elastic network interfaces\n that Amazon EKS creates to use that allow communication between your nodes and the Kubernetes\n control plane. If you don't specify any security groups, then familiarize yourself with\n the difference between Amazon EKS defaults for clusters deployed with Kubernetes:

              \n
                \n
              • \n

                1.14 Amazon EKS platform version eks.2 and earlier

                \n
              • \n
              • \n

                1.14 Amazon EKS platform version eks.3 and later

                \n
              • \n
              \n

              For more information, see Amazon EKS security group\n considerations in the \n Amazon EKS User Guide\n .

              " + "smithy.api#documentation": "

              Specify one or more security groups for the cross-account elastic network interfaces\n that Amazon EKS creates to use that allow communication between your nodes and\n the Kubernetes control plane. If you don't specify any security groups, then familiarize\n yourself with the difference between Amazon EKS defaults for clusters deployed\n with Kubernetes:

              \n
                \n
              • \n

                1.14 Amazon EKS platform version eks.2 and earlier

                \n
              • \n
              • \n

                1.14 Amazon EKS platform version eks.3 and later

                \n
              • \n
              \n

              For more information, see Amazon EKS security group\n considerations in the \n Amazon EKS User Guide\n .

              " } }, "endpointPublicAccess": { "target": "com.amazonaws.eks#BoxedBoolean", "traits": { - "smithy.api#documentation": "

              Set this value to false to disable public access to your cluster's\n Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes\n API server can only receive requests from within the cluster VPC. The default value for\n this parameter is true, which enables public access for your Kubernetes API\n server. For more information, see Amazon EKS cluster\n endpoint access control in the \n Amazon EKS User Guide\n .

              " + "smithy.api#documentation": "

              Set this value to false to disable public access to your cluster's\n Kubernetes API server endpoint. If you disable public access, your cluster's Kubernetes\n API server can only receive requests from within the cluster VPC. The default value for\n this parameter is true, which enables public access for your Kubernetes API\n server. For more information, see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

              " } }, "endpointPrivateAccess": { "target": "com.amazonaws.eks#BoxedBoolean", "traits": { - "smithy.api#documentation": "

              Set this value to true to enable private access for your cluster's\n Kubernetes API server endpoint. If you enable private access, Kubernetes API requests\n from within your cluster's VPC use the private VPC endpoint. The default value for this\n parameter is false, which disables private access for your Kubernetes API\n server. If you disable private access and you have nodes or Fargate pods in the\n cluster, then ensure that publicAccessCidrs includes the necessary CIDR\n blocks for communication with the nodes or Fargate pods. For more information, see Amazon EKS cluster\n endpoint access control in the \n Amazon EKS User Guide\n .

              " + "smithy.api#documentation": "

              Set this value to true to enable private access for your cluster's\n Kubernetes API server endpoint. If you enable private access, Kubernetes API requests\n from within your cluster's VPC use the private VPC endpoint. The default value for this\n parameter is false, which disables private access for your Kubernetes API\n server. If you disable private access and you have nodes or Fargate\n pods in the cluster, then ensure that publicAccessCidrs includes the\n necessary CIDR blocks for communication with the nodes or Fargate pods.\n For more information, see Amazon EKS cluster endpoint access control in\n the \n Amazon EKS User Guide\n .

              " } }, "publicAccessCidrs": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

              The CIDR blocks that are allowed access to your cluster's public Kubernetes API server\n endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that\n you specify is denied. The default value is 0.0.0.0/0. If you've disabled\n private endpoint access and you have nodes or Fargate pods in the cluster, then ensure\n that you specify the necessary CIDR blocks. For more information, see Amazon EKS cluster\n endpoint access control in the \n Amazon EKS User Guide\n .

              " + "smithy.api#documentation": "

              The CIDR blocks that are allowed access to your cluster's public Kubernetes API server\n endpoint. Communication to the endpoint from addresses outside of the CIDR blocks that\n you specify is denied. The default value is 0.0.0.0/0. If you've disabled\n private endpoint access and you have nodes or Fargate pods in the\n cluster, then ensure that you specify the necessary CIDR blocks. For more information,\n see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

              " } } }, "traits": { - "smithy.api#documentation": "

              An object representing the VPC configuration to use for an Amazon EKS cluster.

              " + "smithy.api#documentation": "

              An object representing the VPC configuration to use for an Amazon EKS\n cluster.

              " } }, "com.amazonaws.eks#VpcConfigResponse": { @@ -5552,7 +5585,7 @@ "clusterSecurityGroupId": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "

              The cluster security group that was created by Amazon EKS for the cluster. Managed node\n groups use this security group for control-plane-to-data-plane communication.

              " + "smithy.api#documentation": "

              The cluster security group that was created by Amazon EKS for the cluster.\n Managed node groups use this security group for control-plane-to-data-plane\n communication.

              " } }, "vpcId": { @@ -5564,24 +5597,24 @@ "endpointPublicAccess": { "target": "com.amazonaws.eks#Boolean", "traits": { - "smithy.api#documentation": "

              This parameter indicates whether the Amazon EKS public API server endpoint is enabled. If\n the Amazon EKS public API server endpoint is disabled, your cluster's Kubernetes API server\n can only receive requests that originate from within the cluster VPC.

              " + "smithy.api#documentation": "

              This parameter indicates whether the Amazon EKS public API server endpoint is\n enabled. If the Amazon EKS public API server endpoint is disabled, your\n cluster's Kubernetes API server can only receive requests that originate from within the\n cluster VPC.

              " } }, "endpointPrivateAccess": { "target": "com.amazonaws.eks#Boolean", "traits": { - "smithy.api#documentation": "

              This parameter indicates whether the Amazon EKS private API server endpoint is enabled. If\n the Amazon EKS private API server endpoint is enabled, Kubernetes API requests that originate\n from within your cluster's VPC use the private VPC endpoint instead of traversing the\n internet. If this value is disabled and you have nodes or Fargate pods in the cluster,\n then ensure that publicAccessCidrs includes the necessary CIDR blocks for\n communication with the nodes or Fargate pods. For more information, see Amazon EKS cluster\n endpoint access control in the \n Amazon EKS User Guide\n .

              " + "smithy.api#documentation": "

              This parameter indicates whether the Amazon EKS private API server endpoint is\n enabled. If the Amazon EKS private API server endpoint is enabled, Kubernetes\n API requests that originate from within your cluster's VPC use the private VPC endpoint\n instead of traversing the internet. If this value is disabled and you have nodes or\n Fargate pods in the cluster, then ensure that\n publicAccessCidrs includes the necessary CIDR blocks for communication\n with the nodes or Fargate pods. For more information, see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

              " } }, "publicAccessCidrs": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "

              The CIDR blocks that are allowed access to your cluster's public Kubernetes API server\n endpoint. Communication to the endpoint from addresses outside of the listed CIDR blocks\n is denied. The default value is 0.0.0.0/0. If you've disabled private\n endpoint access and you have nodes or Fargate pods in the cluster, then ensure that the\n necessary CIDR blocks are listed. For more information, see Amazon EKS cluster\n endpoint access control in the \n Amazon EKS User Guide\n .

              " + "smithy.api#documentation": "

              The CIDR blocks that are allowed access to your cluster's public Kubernetes API server\n endpoint. Communication to the endpoint from addresses outside of the listed CIDR blocks\n is denied. The default value is 0.0.0.0/0. If you've disabled private\n endpoint access and you have nodes or Fargate pods in the cluster,\n then ensure that the necessary CIDR blocks are listed. For more information, see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .

              " } } }, "traits": { - "smithy.api#documentation": "

              An object representing an Amazon EKS cluster VPC configuration response.

              " + "smithy.api#documentation": "

              An object representing an Amazon EKS cluster VPC configuration\n response.

              " } }, "com.amazonaws.eks#ZeroCapacity": { diff --git a/codegen/sdk-codegen/aws-models/elasticsearch-service.json b/codegen/sdk-codegen/aws-models/elasticsearch-service.json index 2b57475a802e..96ea152b2eef 100644 --- a/codegen/sdk-codegen/aws-models/elasticsearch-service.json +++ b/codegen/sdk-codegen/aws-models/elasticsearch-service.json @@ -261,6 +261,18 @@ "traits": { "smithy.api#documentation": "

              Describes the SAML application configured for a domain.

              " } + }, + "AnonymousAuthDisableDate": { + "target": "com.amazonaws.elasticsearchservice#DisableTimestamp", + "traits": { + "smithy.api#documentation": "

              Specifies the Anonymous Auth Disable Date when Anonymous Auth is enabled.

              " + } + }, + "AnonymousAuthEnabled": { + "target": "com.amazonaws.elasticsearchservice#Boolean", + "traits": { + "smithy.api#documentation": "

              True if Anonymous auth is enabled. Anonymous auth can be enabled only when AdvancedSecurity is enabled on existing domains.

              " + } } }, "traits": { @@ -293,6 +305,12 @@ "traits": { "smithy.api#documentation": "

              Specifies the SAML application configuration for the domain.

              " } + }, + "AnonymousAuthEnabled": { + "target": "com.amazonaws.elasticsearchservice#Boolean", + "traits": { + "smithy.api#documentation": "

              True if Anonymous auth is enabled. Anonymous auth can be enabled only when AdvancedSecurity is enabled on existing domains.

              " + } } }, "traits": { @@ -323,6 +341,24 @@ }, "com.amazonaws.elasticsearchservice#AmazonElasticsearchService2015": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Elasticsearch Service", + "arnNamespace": "es", + "cloudFormationName": "Elasticsearch", + "cloudTrailEventSource": "elasticsearchservice.amazonaws.com", + "endpointPrefix": "es" + }, + "aws.auth#sigv4": { + "name": "es" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "Amazon Elasticsearch Configuration Service\n

              Use the Amazon Elasticsearch Configuration API to create, configure, and manage Elasticsearch domains.

              \n

              For sample code that uses the Configuration API, see the Amazon Elasticsearch Service Developer Guide.\n The guide also contains sample code for sending signed HTTP requests to the Elasticsearch APIs.

              \n

              The endpoint for configuration service requests is region-specific: es.region.amazonaws.com.\n For example, es.us-east-1.amazonaws.com. For a current list of supported regions and endpoints,\n see Regions and Endpoints.

              ", + "smithy.api#title": "Amazon Elasticsearch Service", + "smithy.api#xmlNamespace": { + "uri": "http://es.amazonaws.com/doc/2015-01-01/" + } + }, "version": "2015-01-01", "operations": [ { @@ -445,25 +481,7 @@ { "target": "com.amazonaws.elasticsearchservice#UpgradeElasticsearchDomain" } - ], - "traits": { - "aws.api#service": { - "sdkId": "Elasticsearch Service", - "arnNamespace": "es", - "cloudFormationName": "Elasticsearch", - "cloudTrailEventSource": "elasticsearchservice.amazonaws.com", - "endpointPrefix": "es" - }, - "aws.auth#sigv4": { - "name": "es" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon Elasticsearch Configuration Service\n

              Use the Amazon Elasticsearch Configuration API to create, configure, and manage Elasticsearch domains.

              \n

              For sample code that uses the Configuration API, see the Amazon Elasticsearch Service Developer Guide.\n The guide also contains sample code for sending signed HTTP requests to the Elasticsearch APIs.

              \n

              The endpoint for configuration service requests is region-specific: es.region.amazonaws.com.\n For example, es.us-east-1.amazonaws.com. For a current list of supported regions and endpoints,\n see Regions and Endpoints.

              ", - "smithy.api#title": "Amazon Elasticsearch Service", - "smithy.api#xmlNamespace": { - "uri": "http://es.amazonaws.com/doc/2015-01-01/" - } - } + ] }, "com.amazonaws.elasticsearchservice#AssociatePackage": { "type": "operation", @@ -2508,6 +2526,9 @@ "smithy.api#documentation": "

              Container for results from DescribeReservedElasticsearchInstances

              " } }, + "com.amazonaws.elasticsearchservice#DisableTimestamp": { + "type": "timestamp" + }, "com.amazonaws.elasticsearchservice#DisabledOperationException": { "type": "structure", "members": { diff --git a/codegen/sdk-codegen/aws-models/glue.json b/codegen/sdk-codegen/aws-models/glue.json index b23731d2523d..18771f70c7ef 100644 --- a/codegen/sdk-codegen/aws-models/glue.json +++ b/codegen/sdk-codegen/aws-models/glue.json @@ -363,6 +363,15 @@ { "target": "com.amazonaws.glue#GetTriggers" }, + { + "target": "com.amazonaws.glue#GetUnfilteredPartitionMetadata" + }, + { + "target": "com.amazonaws.glue#GetUnfilteredPartitionsMetadata" + }, + { + "target": "com.amazonaws.glue#GetUnfilteredTableMetadata" + }, { "target": "com.amazonaws.glue#GetUserDefinedFunction" }, @@ -563,6 +572,15 @@ "smithy.api#error": "client" } }, + "com.amazonaws.glue#AccountId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 12 + } + } + }, "com.amazonaws.glue#Action": { "type": "structure", "members": { @@ -640,6 +658,23 @@ "com.amazonaws.glue#AttemptCount": { "type": "integer" }, + "com.amazonaws.glue#AuditContext": { + "type": "structure", + "members": { + "AdditionalAuditContext": { + "target": "com.amazonaws.glue#AuditContextString" + } + } + }, + "com.amazonaws.glue#AuditContextString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, "com.amazonaws.glue#BackfillError": { "type": "structure", "members": { @@ -2302,6 +2337,12 @@ "smithy.api#documentation": "

              A list of the tables to be synchronized.

              ", "smithy.api#required": {} } + }, + "ConnectionName": { + "target": "com.amazonaws.glue#ConnectionName", + "traits": { + "smithy.api#documentation": "

              The name of the connection for an Amazon S3-backed Data Catalog table to be a target of the crawl when using a Catalog connection type paired with a NETWORK Connection type.

              " + } } }, "traits": { @@ -2684,6 +2725,23 @@ "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" } }, + "com.amazonaws.glue#ColumnRowFilter": { + "type": "structure", + "members": { + "ColumnName": { + "target": "com.amazonaws.glue#NameString" + }, + "RowFilterExpression": { + "target": "com.amazonaws.glue#PredicateString" + } + } + }, + "com.amazonaws.glue#ColumnRowFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#ColumnRowFilter" + } + }, "com.amazonaws.glue#ColumnStatistics": { "type": "structure", "members": { @@ -3598,6 +3656,9 @@ "traits": { "smithy.api#documentation": "

              The name of the SecurityConfiguration structure to be used by this\n crawler.

              " } + }, + "LakeFormationConfiguration": { + "target": "com.amazonaws.glue#LakeFormationConfiguration" } }, "traits": { @@ -3821,6 +3882,12 @@ "traits": { "smithy.api#documentation": "

              Specifies Glue Data Catalog targets.

              " } + }, + "DeltaTargets": { + "target": "com.amazonaws.glue#DeltaTargetList", + "traits": { + "smithy.api#documentation": "

              Specifies Delta data store targets.

              " + } } }, "traits": { @@ -4109,6 +4176,9 @@ "smithy.api#documentation": "

              Specifies data lineage configuration settings for the crawler.

              " } }, + "LakeFormationConfiguration": { + "target": "com.amazonaws.glue#LakeFormationConfiguration" + }, "Configuration": { "target": "com.amazonaws.glue#CrawlerConfiguration", "traits": { @@ -7373,6 +7443,38 @@ } } }, + "com.amazonaws.glue#DeltaTarget": { + "type": "structure", + "members": { + "DeltaTables": { + "target": "com.amazonaws.glue#PathList", + "traits": { + "smithy.api#documentation": "

              A list of the Amazon S3 paths to the Delta tables.

              " + } + }, + "ConnectionName": { + "target": "com.amazonaws.glue#ConnectionName", + "traits": { + "smithy.api#documentation": "

              The name of the connection to use to connect to the Delta table target.

              " + } + }, + "WriteManifest": { + "target": "com.amazonaws.glue#NullableBoolean", + "traits": { + "smithy.api#documentation": "

              Specifies whether to write the manifest files to the Delta table path.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              Specifies a Delta data store to crawl one or more Delta tables.

              " + } + }, + "com.amazonaws.glue#DeltaTargetList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#DeltaTarget" + } + }, "com.amazonaws.glue#DescriptionString": { "type": "string", "traits": { @@ -11619,6 +11721,256 @@ } } }, + "com.amazonaws.glue#GetUnfilteredPartitionMetadata": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#GetUnfilteredPartitionMetadataRequest" + }, + "output": { + "target": "com.amazonaws.glue#GetUnfilteredPartitionMetadataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#GlueEncryptionException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + }, + { + "target": "com.amazonaws.glue#PermissionTypeMismatchException" + } + ] + }, + "com.amazonaws.glue#GetUnfilteredPartitionMetadataRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.glue#CatalogIdString", + "traits": { + "smithy.api#required": {} + } + }, + "DatabaseName": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#required": {} + } + }, + "TableName": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#required": {} + } + }, + "PartitionValues": { + "target": "com.amazonaws.glue#ValueStringList", + "traits": { + "smithy.api#required": {} + } + }, + "AuditContext": { + "target": "com.amazonaws.glue#AuditContext" + }, + "SupportedPermissionTypes": { + "target": "com.amazonaws.glue#PermissionTypeList", + "traits": { + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.glue#GetUnfilteredPartitionMetadataResponse": { + "type": "structure", + "members": { + "Partition": { + "target": "com.amazonaws.glue#Partition" + }, + "AuthorizedColumns": { + "target": "com.amazonaws.glue#NameStringList" + }, + "IsRegisteredWithLakeFormation": { + "target": "com.amazonaws.glue#Boolean" + } + } + }, + "com.amazonaws.glue#GetUnfilteredPartitionsMetadata": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#GetUnfilteredPartitionsMetadataRequest" + }, + "output": { + "target": "com.amazonaws.glue#GetUnfilteredPartitionsMetadataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#GlueEncryptionException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + }, + { + "target": "com.amazonaws.glue#PermissionTypeMismatchException" + } + ], + "traits": { + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.glue#GetUnfilteredPartitionsMetadataRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.glue#CatalogIdString", + "traits": { + "smithy.api#required": {} + } + }, + "DatabaseName": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#required": {} + } + }, + "TableName": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#required": {} + } + }, + "Expression": { + "target": "com.amazonaws.glue#PredicateString" + }, + "AuditContext": { + "target": "com.amazonaws.glue#AuditContext" + }, + "SupportedPermissionTypes": { + "target": "com.amazonaws.glue#PermissionTypeList", + "traits": { + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.glue#Token" + }, + "Segment": { + "target": "com.amazonaws.glue#Segment" + }, + "MaxResults": { + "target": "com.amazonaws.glue#PageSize" + } + } + }, + "com.amazonaws.glue#GetUnfilteredPartitionsMetadataResponse": { + "type": "structure", + "members": { + "UnfilteredPartitions": { + "target": "com.amazonaws.glue#UnfilteredPartitionList" + }, + "NextToken": { + "target": "com.amazonaws.glue#Token" + } + } + }, + "com.amazonaws.glue#GetUnfilteredTableMetadata": { + "type": "operation", + "input": { + "target": "com.amazonaws.glue#GetUnfilteredTableMetadataRequest" + }, + "output": { + "target": "com.amazonaws.glue#GetUnfilteredTableMetadataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.glue#EntityNotFoundException" + }, + { + "target": "com.amazonaws.glue#GlueEncryptionException" + }, + { + "target": "com.amazonaws.glue#InternalServiceException" + }, + { + "target": "com.amazonaws.glue#InvalidInputException" + }, + { + "target": "com.amazonaws.glue#OperationTimeoutException" + }, + { + "target": "com.amazonaws.glue#PermissionTypeMismatchException" + } + ] + }, + "com.amazonaws.glue#GetUnfilteredTableMetadataRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.glue#CatalogIdString", + "traits": { + "smithy.api#required": {} + } + }, + "DatabaseName": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.glue#NameString", + "traits": { + "smithy.api#required": {} + } + }, + "AuditContext": { + "target": "com.amazonaws.glue#AuditContext" + }, + "SupportedPermissionTypes": { + "target": "com.amazonaws.glue#PermissionTypeList", + "traits": { + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.glue#GetUnfilteredTableMetadataResponse": { + "type": "structure", + "members": { + "Table": { + "target": "com.amazonaws.glue#Table" + }, + "AuthorizedColumns": { + "target": "com.amazonaws.glue#NameStringList" + }, + "IsRegisteredWithLakeFormation": { + "target": "com.amazonaws.glue#Boolean" + }, + "CellFilters": { + "target": "com.amazonaws.glue#ColumnRowFilterList" + } + } + }, "com.amazonaws.glue#GetUserDefinedFunction": { "type": "operation", "input": { @@ -13100,6 +13452,17 @@ "smithy.api#documentation": "

              Specifies configuration properties for a labeling set generation task run.

              " } }, + "com.amazonaws.glue#LakeFormationConfiguration": { + "type": "structure", + "members": { + "UseLakeFormationCredentials": { + "target": "com.amazonaws.glue#NullableBoolean" + }, + "AccountId": { + "target": "com.amazonaws.glue#AccountId" + } + } + }, "com.amazonaws.glue#Language": { "type": "string", "traits": { @@ -13981,6 +14344,12 @@ "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" } }, + "com.amazonaws.glue#LocationStringList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#LocationString" + } + }, "com.amazonaws.glue#LogGroup": { "type": "string", "traits": { @@ -15081,6 +15450,44 @@ "target": "com.amazonaws.glue#Permission" } }, + "com.amazonaws.glue#PermissionType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "COLUMN_PERMISSION", + "name": "COLUMN_PERMISSION" + }, + { + "value": "CELL_FILTER_PERMISSION", + "name": "CELL_FILTER_PERMISSION" + } + ] + } + }, + "com.amazonaws.glue#PermissionTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#PermissionType" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.glue#PermissionTypeMismatchException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.glue#MessageString" + } + }, + "traits": { + "smithy.api#error": "client" + } + }, "com.amazonaws.glue#PhysicalConnectionRequirements": { "type": "structure", "members": { @@ -17860,6 +18267,9 @@ "smithy.api#documentation": "

              The physical location of the table. By default, this takes the form of the warehouse\n location, followed by the database location in the warehouse, followed by the table\n name.

              " } }, + "AdditionalLocations": { + "target": "com.amazonaws.glue#LocationStringList" + }, "InputFormat": { "target": "com.amazonaws.glue#FormatString", "traits": { @@ -19118,6 +19528,26 @@ "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" } }, + "com.amazonaws.glue#UnfilteredPartition": { + "type": "structure", + "members": { + "Partition": { + "target": "com.amazonaws.glue#Partition" + }, + "AuthorizedColumns": { + "target": "com.amazonaws.glue#NameStringList" + }, + "IsRegisteredWithLakeFormation": { + "target": "com.amazonaws.glue#Boolean" + } + } + }, + "com.amazonaws.glue#UnfilteredPartitionList": { + "type": "list", + "member": { + "target": "com.amazonaws.glue#UnfilteredPartition" + } + }, "com.amazonaws.glue#UntagResource": { "type": "operation", "input": { @@ -19626,6 +20056,9 @@ "smithy.api#documentation": "

              Specifies data lineage configuration settings for the crawler.

              " } }, + "LakeFormationConfiguration": { + "target": "com.amazonaws.glue#LakeFormationConfiguration" + }, "Configuration": { "target": "com.amazonaws.glue#CrawlerConfiguration", "traits": { diff --git a/codegen/sdk-codegen/aws-models/greengrassv2.json b/codegen/sdk-codegen/aws-models/greengrassv2.json index 7c6138919954..7f9099cf860f 100644 --- a/codegen/sdk-codegen/aws-models/greengrassv2.json +++ b/codegen/sdk-codegen/aws-models/greengrassv2.json @@ -110,6 +110,56 @@ } } }, + "com.amazonaws.greengrassv2#AssociateServiceRoleToAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.greengrassv2#AssociateServiceRoleToAccountRequest" + }, + "output": { + "target": "com.amazonaws.greengrassv2#AssociateServiceRoleToAccountResponse" + }, + "errors": [ + { + "target": "com.amazonaws.greengrassv2#InternalServerException" + }, + { + "target": "com.amazonaws.greengrassv2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Associates a Greengrass service role with IoT Greengrass for your Amazon Web Services account in this Amazon Web Services Region. IoT Greengrass\n uses this role to verify the identity of client devices and manage core device connectivity\n information. The role must include the AWSGreengrassResourceAccessRolePolicy managed policy or a custom policy that\n defines equivalent permissions for the IoT Greengrass features that you use. For more information, see\n Greengrass service role in the IoT Greengrass Version 2 Developer Guide.

              ", + "smithy.api#http": { + "method": "PUT", + "uri": "/greengrass/servicerole", + "code": 200 + } + } + }, + "com.amazonaws.greengrassv2#AssociateServiceRoleToAccountRequest": { + "type": "structure", + "members": { + "roleArn": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the service role to associate with IoT Greengrass for your\n Amazon Web Services account in this Amazon Web Services Region.

              ", + "smithy.api#jsonName": "RoleArn", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.greengrassv2#AssociateServiceRoleToAccountResponse": { + "type": "structure", + "members": { + "associatedAt": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              The time when the service role was associated with IoT Greengrass for your Amazon Web Services account in this\n Amazon Web Services Region.

              ", + "smithy.api#jsonName": "AssociatedAt" + } + } + } + }, "com.amazonaws.greengrassv2#AssociatedClientDevice": { "type": "structure", "members": { @@ -168,7 +218,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Associate a list of client devices with a core device. Use this API operation to specify\n which client devices can discover a core device through cloud discovery. With cloud discovery,\n client devices connect to IoT Greengrass to retrieve associated core devices' connectivity information\n and certificates. For more information, see Configure cloud\n discovery in the IoT Greengrass V2 Developer Guide.

              \n \n \n

              Client devices are local IoT devices that connect to and communicate with an IoT Greengrass core\n device over MQTT. You can connect client devices to a core device to sync MQTT messages and\n data to Amazon Web Services IoT Core and interact with client devices in Greengrass components. For more information,\n see Interact with\n local IoT devices in the IoT Greengrass V2 Developer Guide.

              \n
              ", + "smithy.api#documentation": "

              Associates a list of client devices with a core device. Use this API operation to specify\n which client devices can discover a core device through cloud discovery. With cloud discovery,\n client devices connect to IoT Greengrass to retrieve associated core devices' connectivity information\n and certificates. For more information, see Configure cloud\n discovery in the IoT Greengrass V2 Developer Guide.

              \n \n \n

              Client devices are local IoT devices that connect to and communicate with an IoT Greengrass core\n device over MQTT. You can connect client devices to a core device to sync MQTT messages and\n data to Amazon Web Services IoT Core and interact with client devices in Greengrass components. For more information,\n see Interact with\n local IoT devices in the IoT Greengrass V2 Developer Guide.

              \n
              ", "smithy.api#http": { "method": "POST", "uri": "/greengrass/v2/coreDevices/{coreDeviceThingName}/associateClientDevices", @@ -232,7 +282,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Disassociate a list of client devices from a core device. After you disassociate a client\n device from a core device, the client device won't be able to use cloud discovery to retrieve\n the core device's connectivity information and certificates.

              ", + "smithy.api#documentation": "

              Disassociates a list of client devices from a core device. After you disassociate a client\n device from a core device, the client device won't be able to use cloud discovery to retrieve\n the core device's connectivity information and certificates.

              ", "smithy.api#http": { "method": "POST", "uri": "/greengrass/v2/coreDevices/{coreDeviceThingName}/disassociateClientDevices", @@ -265,7 +315,7 @@ "errorEntries": { "target": "com.amazonaws.greengrassv2#DisassociateClientDeviceFromCoreDeviceErrorList", "traits": { - "smithy.api#documentation": "

              The list of errors (if any) for the entries in the request. Each error entry contains the\n name of the IoT thing that failed to disassociate.

              " + "smithy.api#documentation": "

              The list of any errors for the entries in the request. Each error entry contains the name\n of the IoT thing that failed to disassociate.

              " } } } @@ -803,6 +853,42 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.greengrassv2#ConnectivityInfo": { + "type": "structure", + "members": { + "id": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              An ID for the connectivity information.

              ", + "smithy.api#jsonName": "Id" + } + }, + "hostAddress": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              The IP address or DNS address where client devices can connect to an MQTT broker on the\n Greengrass core device.

              ", + "smithy.api#jsonName": "HostAddress" + } + }, + "portNumber": { + "target": "com.amazonaws.greengrassv2#PortNumberInt", + "traits": { + "smithy.api#documentation": "

              The port where the MQTT broker operates on the core device. This port is typically 8883,\n which is the default port for the MQTT broker component that runs on core devices.

              ", + "smithy.api#jsonName": "PortNumber" + } + }, + "metadata": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              Additional metadata to provide to client devices that connect to this core device.

              ", + "smithy.api#jsonName": "Metadata" + } + } + }, + "traits": { + "smithy.api#documentation": "

              Contains information about an endpoint and port where client devices can connect to an\n MQTT broker on a Greengrass core device.

              " + } + }, "com.amazonaws.greengrassv2#CoreDevice": { "type": "structure", "members": { @@ -815,7 +901,7 @@ "status": { "target": "com.amazonaws.greengrassv2#CoreDeviceStatus", "traits": { - "smithy.api#documentation": "

              The status of the core device. Core devices can have the following statuses:

              \n
                \n
              • \n

                \n HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue.

                \n
              • \n
              • \n

                \n UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state\n on the core device.

                \n
              • \n
              " + "smithy.api#documentation": "

              The status of the core device. Core devices can have the following\n statuses:

              \n
                \n
              • \n

                \n HEALTHY – The IoT Greengrass Core software and all components run on the core device without issue.

                \n
              • \n
              • \n

                \n UNHEALTHY – The IoT Greengrass Core software or a component is in a failed state\n on the core device.

                \n
              • \n
              " } }, "lastStatusUpdateTimestamp": { @@ -1603,6 +1689,44 @@ } } }, + "com.amazonaws.greengrassv2#DisassociateServiceRoleFromAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.greengrassv2#DisassociateServiceRoleFromAccountRequest" + }, + "output": { + "target": "com.amazonaws.greengrassv2#DisassociateServiceRoleFromAccountResponse" + }, + "errors": [ + { + "target": "com.amazonaws.greengrassv2#InternalServerException" + } + ], + "traits": { + "smithy.api#documentation": "

              Disassociates the Greengrass service role from IoT Greengrass for your Amazon Web Services account in this Amazon Web Services Region.\n Without a service role, IoT Greengrass can't verify the identity of client devices or manage core device\n connectivity information. For more information, see Greengrass service role in\n the IoT Greengrass Version 2 Developer Guide.

              ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/greengrass/servicerole", + "code": 200 + } + } + }, + "com.amazonaws.greengrassv2#DisassociateServiceRoleFromAccountRequest": { + "type": "structure", + "members": {} + }, + "com.amazonaws.greengrassv2#DisassociateServiceRoleFromAccountResponse": { + "type": "structure", + "members": { + "disassociatedAt": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              The time when the service role was disassociated from IoT Greengrass for your Amazon Web Services account in this\n Amazon Web Services Region.

              ", + "smithy.api#jsonName": "DisassociatedAt" + } + } + } + }, "com.amazonaws.greengrassv2#EffectiveDeployment": { "type": "structure", "members": { @@ -1882,6 +2006,63 @@ } } }, + "com.amazonaws.greengrassv2#GetConnectivityInfo": { + "type": "operation", + "input": { + "target": "com.amazonaws.greengrassv2#GetConnectivityInfoRequest" + }, + "output": { + "target": "com.amazonaws.greengrassv2#GetConnectivityInfoResponse" + }, + "errors": [ + { + "target": "com.amazonaws.greengrassv2#InternalServerException" + }, + { + "target": "com.amazonaws.greengrassv2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Retrieves connectivity information for a Greengrass core device.

              \n

              Connectivity information includes endpoints and ports where client devices\n can connect to an MQTT broker on the core device. When a client device\n calls the Greengrass discovery API,\n IoT Greengrass returns connectivity information for all of the core devices where the client device can\n connect. For more information, see Connect client devices to\n core devices in the IoT Greengrass Version 2 Developer Guide.

              ", + "smithy.api#http": { + "method": "GET", + "uri": "/greengrass/things/{thingName}/connectivityInfo", + "code": 200 + } + } + }, + "com.amazonaws.greengrassv2#GetConnectivityInfoRequest": { + "type": "structure", + "members": { + "thingName": { + "target": "com.amazonaws.greengrassv2#CoreDeviceThingName", + "traits": { + "smithy.api#documentation": "

              The name of the core device. This is also the name of the IoT thing.

              ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.greengrassv2#GetConnectivityInfoResponse": { + "type": "structure", + "members": { + "connectivityInfo": { + "target": "com.amazonaws.greengrassv2#connectivityInfoList", + "traits": { + "smithy.api#documentation": "

              The connectivity information for the core device.

              ", + "smithy.api#jsonName": "ConnectivityInfo" + } + }, + "message": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              A message about the connectivity information request.

              ", + "smithy.api#jsonName": "Message" + } + } + } + }, "com.amazonaws.greengrassv2#GetCoreDevice": { "type": "operation", "input": { @@ -2106,6 +2287,51 @@ } } }, + "com.amazonaws.greengrassv2#GetServiceRoleForAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.greengrassv2#GetServiceRoleForAccountRequest" + }, + "output": { + "target": "com.amazonaws.greengrassv2#GetServiceRoleForAccountResponse" + }, + "errors": [ + { + "target": "com.amazonaws.greengrassv2#InternalServerException" + } + ], + "traits": { + "smithy.api#documentation": "

              Gets the service role associated with IoT Greengrass for your Amazon Web Services account in this Amazon Web Services Region.\n IoT Greengrass uses this role to verify the identity of client devices and manage core device\n connectivity information. For more information, see Greengrass service role in\n the IoT Greengrass Version 2 Developer Guide.

              ", + "smithy.api#http": { + "method": "GET", + "uri": "/greengrass/servicerole", + "code": 200 + } + } + }, + "com.amazonaws.greengrassv2#GetServiceRoleForAccountRequest": { + "type": "structure", + "members": {} + }, + "com.amazonaws.greengrassv2#GetServiceRoleForAccountResponse": { + "type": "structure", + "members": { + "associatedAt": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              The time when the service role was associated with IoT Greengrass for your Amazon Web Services account in this\n Amazon Web Services Region.

              ", + "smithy.api#jsonName": "AssociatedAt" + } + }, + "roleArn": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              The ARN of the service role that is associated with IoT Greengrass for your Amazon Web Services account in this\n Amazon Web Services Region.

              ", + "smithy.api#jsonName": "RoleArn" + } + } + } + }, "com.amazonaws.greengrassv2#GreengrassV2": { "type": "service", "traits": { @@ -2125,6 +2351,9 @@ }, "version": "2020-11-30", "operations": [ + { + "target": "com.amazonaws.greengrassv2#AssociateServiceRoleToAccount" + }, { "target": "com.amazonaws.greengrassv2#BatchAssociateClientDeviceWithCoreDevice" }, @@ -2149,18 +2378,27 @@ { "target": "com.amazonaws.greengrassv2#DescribeComponent" }, + { + "target": "com.amazonaws.greengrassv2#DisassociateServiceRoleFromAccount" + }, { "target": "com.amazonaws.greengrassv2#GetComponent" }, { "target": "com.amazonaws.greengrassv2#GetComponentVersionArtifact" }, + { + "target": "com.amazonaws.greengrassv2#GetConnectivityInfo" + }, { "target": "com.amazonaws.greengrassv2#GetCoreDevice" }, { "target": "com.amazonaws.greengrassv2#GetDeployment" }, + { + "target": "com.amazonaws.greengrassv2#GetServiceRoleForAccount" + }, { "target": "com.amazonaws.greengrassv2#ListClientDevicesAssociatedWithCoreDevice" }, @@ -2193,6 +2431,9 @@ }, { "target": "com.amazonaws.greengrassv2#UntagResource" + }, + { + "target": "com.amazonaws.greengrassv2#UpdateConnectivityInfo" } ] }, @@ -3624,6 +3865,15 @@ "target": "com.amazonaws.greengrassv2#NonEmptyString" } }, + "com.amazonaws.greengrassv2#PortNumberInt": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 65535 + } + } + }, "com.amazonaws.greengrassv2#PublisherString": { "type": "string" }, @@ -4080,6 +4330,72 @@ "type": "structure", "members": {} }, + "com.amazonaws.greengrassv2#UpdateConnectivityInfo": { + "type": "operation", + "input": { + "target": "com.amazonaws.greengrassv2#UpdateConnectivityInfoRequest" + }, + "output": { + "target": "com.amazonaws.greengrassv2#UpdateConnectivityInfoResponse" + }, + "errors": [ + { + "target": "com.amazonaws.greengrassv2#InternalServerException" + }, + { + "target": "com.amazonaws.greengrassv2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              Updates connectivity information for a Greengrass core device.

              \n

              Connectivity information includes endpoints and ports where client devices\n can connect to an MQTT broker on the core device. When a client device\n calls the Greengrass discovery API,\n IoT Greengrass returns connectivity information for all of the core devices where the client device can\n connect. For more information, see Connect client devices to\n core devices in the IoT Greengrass Version 2 Developer Guide.

              ", + "smithy.api#http": { + "method": "PUT", + "uri": "/greengrass/things/{thingName}/connectivityInfo", + "code": 200 + } + } + }, + "com.amazonaws.greengrassv2#UpdateConnectivityInfoRequest": { + "type": "structure", + "members": { + "thingName": { + "target": "com.amazonaws.greengrassv2#CoreDeviceThingName", + "traits": { + "smithy.api#documentation": "

              The name of the core device. This is also the name of the IoT thing.

              ", + "smithy.api#httpLabel": {}, + "smithy.api#jsonName": "ThingName", + "smithy.api#required": {} + } + }, + "connectivityInfo": { + "target": "com.amazonaws.greengrassv2#connectivityInfoList", + "traits": { + "smithy.api#documentation": "

              The connectivity information for the core device.

              ", + "smithy.api#jsonName": "ConnectivityInfo", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.greengrassv2#UpdateConnectivityInfoResponse": { + "type": "structure", + "members": { + "version": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              The new version of the connectivity information for the core device.

              ", + "smithy.api#jsonName": "Version" + } + }, + "message": { + "target": "com.amazonaws.greengrassv2#String", + "traits": { + "smithy.api#documentation": "

              A message about the connectivity information update request.

              ", + "smithy.api#jsonName": "Message" + } + } + } + }, "com.amazonaws.greengrassv2#ValidationException": { "type": "structure", "members": { @@ -4158,6 +4474,12 @@ } ] } + }, + "com.amazonaws.greengrassv2#connectivityInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.greengrassv2#ConnectivityInfo" + } } } } diff --git a/codegen/sdk-codegen/aws-models/iot-wireless.json b/codegen/sdk-codegen/aws-models/iot-wireless.json index f6e3ec37c025..4719a9387e2a 100644 --- a/codegen/sdk-codegen/aws-models/iot-wireless.json +++ b/codegen/sdk-codegen/aws-models/iot-wireless.json @@ -1793,6 +1793,72 @@ "type": "structure", "members": {} }, + "com.amazonaws.iotwireless#DeleteQueuedMessages": { + "type": "operation", + "input": { + "target": "com.amazonaws.iotwireless#DeleteQueuedMessagesRequest" + }, + "output": { + "target": "com.amazonaws.iotwireless#DeleteQueuedMessagesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iotwireless#AccessDeniedException" + }, + { + "target": "com.amazonaws.iotwireless#InternalServerException" + }, + { + "target": "com.amazonaws.iotwireless#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iotwireless#ThrottlingException" + }, + { + "target": "com.amazonaws.iotwireless#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              The operation to delete queued messages.

              ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/wireless-devices/{Id}/data", + "code": 204 + } + } + }, + "com.amazonaws.iotwireless#DeleteQueuedMessagesRequest": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.iotwireless#WirelessDeviceId", + "traits": { + "smithy.api#documentation": "

              Id of a given wireless device which messages will be deleted

              ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "MessageId": { + "target": "com.amazonaws.iotwireless#MessageId", + "traits": { + "smithy.api#documentation": "

              if messageID==\"*\", the queue for a particular wireless deviceId will be purged, otherwise, the specific message with messageId will be deleted

              ", + "smithy.api#httpQuery": "messageId", + "smithy.api#required": {} + } + }, + "WirelessDeviceType": { + "target": "com.amazonaws.iotwireless#WirelessDeviceType", + "traits": { + "smithy.api#documentation": "

              The wireless device type, it is either Sidewalk or LoRaWAN.

              ", + "smithy.api#httpQuery": "WirelessDeviceType" + } + } + } + }, + "com.amazonaws.iotwireless#DeleteQueuedMessagesResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.iotwireless#DeleteServiceProfile": { "type": "operation", "input": { @@ -2712,6 +2778,41 @@ "smithy.api#box": {} } }, + "com.amazonaws.iotwireless#DownlinkQueueMessage": { + "type": "structure", + "members": { + "MessageId": { + "target": "com.amazonaws.iotwireless#MessageId", + "traits": { + "smithy.api#documentation": "

              The messageId allocated by IoT Wireless for tracing purpose

              " + } + }, + "TransmitMode": { + "target": "com.amazonaws.iotwireless#TransmitMode", + "traits": { + "smithy.api#documentation": "

              The transmit mode to use to send data to the wireless device. Can be: 0 for UM (unacknowledge mode) or 1 for AM (acknowledge mode).

              " + } + }, + "ReceivedAt": { + "target": "com.amazonaws.iotwireless#ISODateTimeString", + "traits": { + "smithy.api#documentation": "

              The timestamp that Iot Wireless received the message.

              " + } + }, + "LoRaWAN": { + "target": "com.amazonaws.iotwireless#LoRaWANSendDataToDevice" + } + }, + "traits": { + "smithy.api#documentation": "

              The message in downlink queue.

              " + } + }, + "com.amazonaws.iotwireless#DownlinkQueueMessagesList": { + "type": "list", + "member": { + "target": "com.amazonaws.iotwireless#DownlinkQueueMessage" + } + }, "com.amazonaws.iotwireless#DrMax": { "type": "integer", "traits": { @@ -5042,6 +5143,96 @@ } } }, + "com.amazonaws.iotwireless#ListQueuedMessages": { + "type": "operation", + "input": { + "target": "com.amazonaws.iotwireless#ListQueuedMessagesRequest" + }, + "output": { + "target": "com.amazonaws.iotwireless#ListQueuedMessagesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iotwireless#AccessDeniedException" + }, + { + "target": "com.amazonaws.iotwireless#InternalServerException" + }, + { + "target": "com.amazonaws.iotwireless#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iotwireless#ThrottlingException" + }, + { + "target": "com.amazonaws.iotwireless#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

              The operation to list queued messages.

              ", + "smithy.api#http": { + "method": "GET", + "uri": "/wireless-devices/{Id}/data", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.iotwireless#ListQueuedMessagesRequest": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.iotwireless#WirelessDeviceId", + "traits": { + "smithy.api#documentation": "

              Id of a given wireless device which the downlink packets are targeted

              ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.iotwireless#NextToken", + "traits": { + "smithy.api#documentation": "

              To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

              ", + "smithy.api#httpQuery": "nextToken" + } + }, + "MaxResults": { + "target": "com.amazonaws.iotwireless#MaxResults", + "traits": { + "smithy.api#documentation": "

              The maximum number of results to return in this operation.

              ", + "smithy.api#httpQuery": "maxResults" + } + }, + "WirelessDeviceType": { + "target": "com.amazonaws.iotwireless#WirelessDeviceType", + "traits": { + "smithy.api#documentation": "

              The wireless device type, it is either Sidewalk or LoRaWAN.

              ", + "smithy.api#httpQuery": "WirelessDeviceType" + } + } + } + }, + "com.amazonaws.iotwireless#ListQueuedMessagesResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.iotwireless#NextToken", + "traits": { + "smithy.api#documentation": "

              To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

              " + } + }, + "DownlinkQueueMessagesList": { + "target": "com.amazonaws.iotwireless#DownlinkQueueMessagesList", + "traits": { + "smithy.api#documentation": "

              The messages in downlink queue.

              " + } + } + } + }, "com.amazonaws.iotwireless#ListServiceProfiles": { "type": "operation", "input": { @@ -9268,7 +9459,7 @@ "name": "iotwireless" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

              AWS IoT Wireless API documentation

              ", + "smithy.api#documentation": "

              AWS IoT Wireless provides bi-directional communication between internet-connected wireless\n\t devices and the AWS Cloud. To onboard both LoRaWAN and Sidewalk devices to AWS IoT, use the \n\t IoT Wireless API. These wireless devices use the Low Power Wide Area Networking (LPWAN) \n\t communication protocol to communicate with AWS IoT.

              \n\t

              Using the API, you can perform create, read, update, and delete operations for your wireless\n\t devices, gateways, destinations, and profiles. After onboarding your devices, you \n\t can use the API operations to set log levels and monitor your devices with CloudWatch.

              \n\t

              You can also use the API operations to create multicast groups and schedule a multicast session for\n\t sending a downlink message to devices in the group. By using Firmware Updates Over-The-Air\n\t (FUOTA) API operations, you can create a FUOTA task and schedule a session to update the firmware\n\t of individual devices or an entire group of devices in a multicast group.

              ", "smithy.api#title": "AWS IoT Wireless" }, "version": "2020-11-22", @@ -9336,6 +9527,9 @@ { "target": "com.amazonaws.iotwireless#DeleteMulticastGroup" }, + { + "target": "com.amazonaws.iotwireless#DeleteQueuedMessages" + }, { "target": "com.amazonaws.iotwireless#DeleteServiceProfile" }, @@ -9450,6 +9644,9 @@ { "target": "com.amazonaws.iotwireless#ListPartnerAccounts" }, + { + "target": "com.amazonaws.iotwireless#ListQueuedMessages" + }, { "target": "com.amazonaws.iotwireless#ListServiceProfiles" }, diff --git a/codegen/sdk-codegen/aws-models/iot.json b/codegen/sdk-codegen/aws-models/iot.json index a50863ac8b16..bacf1d1baff0 100644 --- a/codegen/sdk-codegen/aws-models/iot.json +++ b/codegen/sdk-codegen/aws-models/iot.json @@ -5817,7 +5817,7 @@ "abortConfig": { "target": "com.amazonaws.iot#AbortConfig", "traits": { - "smithy.api#documentation": "

              Allows you to create criteria to abort a job.

              " + "smithy.api#documentation": "

              Allows you to create the criteria to abort a job.

              " } }, "timeoutConfig": { @@ -5844,6 +5844,12 @@ "smithy.api#documentation": "

              The ARN of the job template used to create the job.

              " } }, + "jobExecutionsRetryConfig": { + "target": "com.amazonaws.iot#JobExecutionsRetryConfig", + "traits": { + "smithy.api#documentation": "

              Allows you to create the criteria to retry a job.

              " + } + }, "documentParameters": { "target": "com.amazonaws.iot#ParameterMap", "traits": { @@ -5965,6 +5971,12 @@ "traits": { "smithy.api#documentation": "

              Metadata that can be used to manage the job template.

              " } + }, + "jobExecutionsRetryConfig": { + "target": "com.amazonaws.iot#JobExecutionsRetryConfig", + "traits": { + "smithy.api#documentation": "

              Allows you to create the criteria to retry a job.

              " + } } } }, @@ -11146,6 +11158,12 @@ }, "timeoutConfig": { "target": "com.amazonaws.iot#TimeoutConfig" + }, + "jobExecutionsRetryConfig": { + "target": "com.amazonaws.iot#JobExecutionsRetryConfig", + "traits": { + "smithy.api#documentation": "

              The configuration that determines how many retries are allowed for each failure type\n for a job.

              " + } } } }, @@ -15882,6 +15900,12 @@ "smithy.api#documentation": "

              The ARN of the job template used to create the job.

              " } }, + "jobExecutionsRetryConfig": { + "target": "com.amazonaws.iot#JobExecutionsRetryConfig", + "traits": { + "smithy.api#documentation": "

              The configuration for the criteria to retry the job.

              " + } + }, "documentParameters": { "target": "com.amazonaws.iot#ParameterMap", "traits": { @@ -16106,6 +16130,12 @@ "traits": { "smithy.api#documentation": "

              A string (consisting of the digits \"0\" through \"9\") which identifies this particular job execution on\n this particular device. It can be used later in commands which return or update job execution\n information.

              " } + }, + "retryAttempt": { + "target": "com.amazonaws.iot#RetryAttempt", + "traits": { + "smithy.api#documentation": "

              The number that indicates how many retry attempts have been completed for this \n job on this device.

              " + } } }, "traits": { @@ -16164,6 +16194,21 @@ "target": "com.amazonaws.iot#JobExecutionSummaryForThing" } }, + "com.amazonaws.iot#JobExecutionsRetryConfig": { + "type": "structure", + "members": { + "criteriaList": { + "target": "com.amazonaws.iot#RetryCriteriaList", + "traits": { + "smithy.api#documentation": "

              The list of criteria that determines how many retries are allowed for each failure\n type for a job.

              ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

              The configuration that determines how many retries are allowed for each failure \n type for a job.

              " + } + }, "com.amazonaws.iot#JobExecutionsRolloutConfig": { "type": "structure", "members": { @@ -18435,6 +18480,13 @@ "smithy.api#documentation": "

              The token to retrieve the next set of results.

              ", "smithy.api#httpQuery": "nextToken" } + }, + "jobId": { + "target": "com.amazonaws.iot#JobId", + "traits": { + "smithy.api#documentation": "

              The unique identifier you assigned to this job when it was created.

              ", + "smithy.api#httpQuery": "jobId" + } } } }, @@ -22174,6 +22226,16 @@ "target": "com.amazonaws.iot#Number" } }, + "com.amazonaws.iot#NumberOfRetries": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 10 + } + } + }, "com.amazonaws.iot#NumberOfThings": { "type": "integer", "traits": { @@ -24276,6 +24338,65 @@ "target": "com.amazonaws.iot#Resource" } }, + "com.amazonaws.iot#RetryAttempt": { + "type": "integer", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.iot#RetryCriteria": { + "type": "structure", + "members": { + "failureType": { + "target": "com.amazonaws.iot#RetryableFailureType", + "traits": { + "smithy.api#documentation": "

              The type of job execution failures that can initiate a job retry.

              ", + "smithy.api#required": {} + } + }, + "numberOfRetries": { + "target": "com.amazonaws.iot#NumberOfRetries", + "traits": { + "smithy.api#documentation": "

              The number of retries allowed for a failure type for the job.

              ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

              The criteria that determines how many retries are allowed for each failure\n type for a job.

              " + } + }, + "com.amazonaws.iot#RetryCriteriaList": { + "type": "list", + "member": { + "target": "com.amazonaws.iot#RetryCriteria" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, + "com.amazonaws.iot#RetryableFailureType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FAILED", + "name": "FAILED" + }, + { + "value": "TIMED_OUT", + "name": "TIMED_OUT" + }, + { + "value": "ALL", + "name": "ALL" + } + ] + } + }, "com.amazonaws.iot#RoleAlias": { "type": "string", "traits": { @@ -29142,6 +29263,12 @@ "smithy.api#documentation": "

              The namespace used to indicate that a job is a customer-managed job.

              \n

              When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that \n contain the value in the following format.

              \n

              \n $aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/\n

              \n \n

              The namespaceId feature is in public preview.

              \n
              ", "smithy.api#httpQuery": "namespaceId" } + }, + "jobExecutionsRetryConfig": { + "target": "com.amazonaws.iot#JobExecutionsRetryConfig", + "traits": { + "smithy.api#documentation": "

              Allows you to create the criteria to retry a job.

              " + } } } }, diff --git a/codegen/sdk-codegen/aws-models/lakeformation.json b/codegen/sdk-codegen/aws-models/lakeformation.json index ac79531802cf..cc0feedaa402 100644 --- a/codegen/sdk-codegen/aws-models/lakeformation.json +++ b/codegen/sdk-codegen/aws-models/lakeformation.json @@ -111,6 +111,12 @@ { "target": "com.amazonaws.lakeformation#GetTableObjects" }, + { + "target": "com.amazonaws.lakeformation#GetTemporaryGluePartitionCredentials" + }, + { + "target": "com.amazonaws.lakeformation#GetTemporaryGlueTableCredentials" + }, { "target": "com.amazonaws.lakeformation#GetWorkUnitResults" }, @@ -192,6 +198,9 @@ "smithy.api#httpError": 403 } }, + "com.amazonaws.lakeformation#AccessKeyIdString": { + "type": "string" + }, "com.amazonaws.lakeformation#AddLFTagsToResource": { "type": "operation", "input": { @@ -322,6 +331,36 @@ "smithy.api#error": "client" } }, + "com.amazonaws.lakeformation#AuditContext": { + "type": "structure", + "members": { + "AdditionalAuditContext": { + "target": "com.amazonaws.lakeformation#AuditContextString", + "traits": { + "smithy.api#documentation": "

              The filter engine can populate the 'AdditionalAuditContext' information with the request ID for you to track. This information will be displayed in CloudTrail log in your account.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              A structure used to include auditing information on the privileged API.

              " + } + }, + "com.amazonaws.lakeformation#AuditContextString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" + } + }, + "com.amazonaws.lakeformation#AuthorizedSessionTagValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#NameString" + } + }, "com.amazonaws.lakeformation#BatchGrantPermissions": { "type": "operation", "input": { @@ -874,6 +913,16 @@ "type": "structure", "members": {} }, + "com.amazonaws.lakeformation#CredentialTimeoutDurationSecondInteger": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 900, + "max": 43200 + } + } + }, "com.amazonaws.lakeformation#DataCellsFilter": { "type": "structure", "members": { @@ -920,7 +969,7 @@ "ColumnWildcard": { "target": "com.amazonaws.lakeformation#ColumnWildcard", "traits": { - "smithy.api#documentation": "

              A wildcard with exclusions.

              " + "smithy.api#documentation": "

              A wildcard with exclusions.

              \n \n

              You must specify either a ColumnNames list or the\n ColumnWildCard.

              " } } }, @@ -977,7 +1026,7 @@ } }, "traits": { - "smithy.api#documentation": "

              The AWS Lake Formation principal. Supported principals are IAM users or IAM roles.

              " + "smithy.api#documentation": "

              The Lake Formation principal. Supported principals are IAM users\n or IAM roles.

              " } }, "com.amazonaws.lakeformation#DataLakePrincipalList": { @@ -1066,6 +1115,24 @@ "traits": { "smithy.api#documentation": "

              A list of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). The user ARNs can be logged in the resource owner's CloudTrail log.

              \n\t\n\t

              You may want to specify this property when you are in a high-trust boundary, such as the same team or company.

              " } + }, + "AllowExternalDataFiltering": { + "target": "com.amazonaws.lakeformation#NullableBoolean", + "traits": { + "smithy.api#documentation": "

              Whether to allow Amazon EMR clusters to access data managed by Lake Formation.

              \n\t\n

              If true, you allow Amazon EMR clusters to access data in Amazon S3 locations that are registered with Lake Formation.

              \n\t\n

              If false or null, no Amazon EMR clusters will be able to access data in Amazon S3 locations that are registered with Lake Formation.

              \n \n

              For more information, see (Optional) Allow Data Filtering on Amazon EMR.

              " + } + }, + "ExternalDataFilteringAllowList": { + "target": "com.amazonaws.lakeformation#DataLakePrincipalList", + "traits": { + "smithy.api#documentation": "

              A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering.>

              " + } + }, + "AuthorizedSessionTagValueList": { + "target": "com.amazonaws.lakeformation#AuthorizedSessionTagValueList", + "traits": { + "smithy.api#documentation": "

              Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. Lake Formation will publish the acceptable key-value pair, for example key = \"LakeFormationTrustedCaller\" and value = \"TRUE\" and the third party integrator must properly tag the temporary security credentials that will be used to call Lake Formation's administrative APIs.

              " + } } }, "traits": { @@ -1615,6 +1682,9 @@ "smithy.api#documentation": "

              Statistics related to the processing of a query statement.

              " } }, + "com.amazonaws.lakeformation#ExpirationTimestamp": { + "type": "timestamp" + }, "com.amazonaws.lakeformation#ExpiredException": { "type": "structure", "members": { @@ -2321,6 +2391,219 @@ } } }, + "com.amazonaws.lakeformation#GetTemporaryGluePartitionCredentials": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#GetTemporaryGluePartitionCredentialsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#GetTemporaryGluePartitionCredentialsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#PermissionTypeMismatchException" + } + ], + "traits": { + "smithy.api#documentation": "

              This API is identical to GetTemporaryTableCredentials except that this is used when the target Data Catalog resource is of type Partition. Lake Formation restricts the permission of the vended credentials with the same scope down policy which restricts access to a single Amazon S3 prefix.

              ", + "smithy.api#http": { + "method": "POST", + "uri": "/GetTemporaryGluePartitionCredentials", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#GetTemporaryGluePartitionCredentialsRequest": { + "type": "structure", + "members": { + "TableArn": { + "target": "com.amazonaws.lakeformation#ResourceArnString", + "traits": { + "smithy.api#documentation": "

              The ARN of the partitions' table.

              ", + "smithy.api#required": {} + } + }, + "Partition": { + "target": "com.amazonaws.lakeformation#PartitionValueList", + "traits": { + "smithy.api#documentation": "

              A list of partition values identifying a single partition.

              ", + "smithy.api#required": {} + } + }, + "Permissions": { + "target": "com.amazonaws.lakeformation#PermissionList", + "traits": { + "smithy.api#documentation": "

              Filters the request based on the user having been granted a list of specified permissions on the requested resource(s).

              " + } + }, + "DurationSeconds": { + "target": "com.amazonaws.lakeformation#CredentialTimeoutDurationSecondInteger", + "traits": { + "smithy.api#documentation": "

              The time period, between 900 and 21,600 seconds, for the timeout of the temporary credentials.

              " + } + }, + "AuditContext": { + "target": "com.amazonaws.lakeformation#AuditContext", + "traits": { + "smithy.api#documentation": "

              A structure representing context to access a resource (column names, query ID, etc).

              " + } + }, + "SupportedPermissionTypes": { + "target": "com.amazonaws.lakeformation#PermissionTypeList", + "traits": { + "smithy.api#documentation": "

              A list of supported permission types for the partition. Valid values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#GetTemporaryGluePartitionCredentialsResponse": { + "type": "structure", + "members": { + "AccessKeyId": { + "target": "com.amazonaws.lakeformation#AccessKeyIdString", + "traits": { + "smithy.api#documentation": "

              The access key ID for the temporary credentials.

              " + } + }, + "SecretAccessKey": { + "target": "com.amazonaws.lakeformation#SecretAccessKeyString", + "traits": { + "smithy.api#documentation": "

              The secret key for the temporary credentials.

              " + } + }, + "SessionToken": { + "target": "com.amazonaws.lakeformation#SessionTokenString", + "traits": { + "smithy.api#documentation": "

              The session token for the temporary credentials.

              " + } + }, + "Expiration": { + "target": "com.amazonaws.lakeformation#ExpirationTimestamp", + "traits": { + "smithy.api#documentation": "

              The date and time when the temporary credentials expire.

              " + } + } + } + }, + "com.amazonaws.lakeformation#GetTemporaryGlueTableCredentials": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#GetTemporaryGlueTableCredentialsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#GetTemporaryGlueTableCredentialsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#PermissionTypeMismatchException" + } + ], + "traits": { + "smithy.api#documentation": "

              Allows a caller in a secure environment to assume a role with permission to access Amazon S3. In order to vend such credentials, Lake Formation assumes the role associated with a registered location, for example an Amazon S3 bucket, with a scope down policy which restricts the access to a single prefix.

              ", + "smithy.api#http": { + "method": "POST", + "uri": "/GetTemporaryGlueTableCredentials", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#GetTemporaryGlueTableCredentialsRequest": { + "type": "structure", + "members": { + "TableArn": { + "target": "com.amazonaws.lakeformation#ResourceArnString", + "traits": { + "smithy.api#documentation": "

              The ARN identifying a table in the Data Catalog for the temporary credentials request.

              ", + "smithy.api#required": {} + } + }, + "Permissions": { + "target": "com.amazonaws.lakeformation#PermissionList", + "traits": { + "smithy.api#documentation": "

              Filters the request based on the user having been granted a list of specified permissions on the requested resource(s).

              " + } + }, + "DurationSeconds": { + "target": "com.amazonaws.lakeformation#CredentialTimeoutDurationSecondInteger", + "traits": { + "smithy.api#documentation": "

              The time period, between 900 and 21,600 seconds, for the timeout of the temporary credentials.

              " + } + }, + "AuditContext": { + "target": "com.amazonaws.lakeformation#AuditContext", + "traits": { + "smithy.api#documentation": "

              A structure representing context to access a resource (column names, query ID, etc).

              " + } + }, + "SupportedPermissionTypes": { + "target": "com.amazonaws.lakeformation#PermissionTypeList", + "traits": { + "smithy.api#documentation": "

              A list of supported permission types for the table. Valid values are COLUMN_PERMISSION and CELL_FILTER_PERMISSION.

              ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#GetTemporaryGlueTableCredentialsResponse": { + "type": "structure", + "members": { + "AccessKeyId": { + "target": "com.amazonaws.lakeformation#AccessKeyIdString", + "traits": { + "smithy.api#documentation": "

              The access key ID for the temporary credentials.

              " + } + }, + "SecretAccessKey": { + "target": "com.amazonaws.lakeformation#SecretAccessKeyString", + "traits": { + "smithy.api#documentation": "

              The secret key for the temporary credentials.

              " + } + }, + "SessionToken": { + "target": "com.amazonaws.lakeformation#SessionTokenString", + "traits": { + "smithy.api#documentation": "

              The session token for the temporary credentials.

              " + } + }, + "Expiration": { + "target": "com.amazonaws.lakeformation#ExpirationTimestamp", + "traits": { + "smithy.api#documentation": "

              The date and time when the temporary credentials expire.

              " + } + } + } + }, "com.amazonaws.lakeformation#GetWorkUnitResults": { "type": "operation", "input": { @@ -3421,6 +3704,21 @@ "smithy.api#documentation": "

              A structure containing a list of partition values and table objects.

              " } }, + "com.amazonaws.lakeformation#PartitionValueList": { + "type": "structure", + "members": { + "Values": { + "target": "com.amazonaws.lakeformation#ValueStringList", + "traits": { + "smithy.api#documentation": "

              The list of partition values.

              ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

              Contains a list of values defining partitions.

              " + } + }, "com.amazonaws.lakeformation#PartitionValueString": { "type": "string", "traits": { @@ -3521,6 +3819,48 @@ "target": "com.amazonaws.lakeformation#Permission" } }, + "com.amazonaws.lakeformation#PermissionType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "COLUMN_PERMISSION", + "name": "COLUMN_PERMISSION" + }, + { + "value": "CELL_FILTER_PERMISSION", + "name": "CELL_FILTER_PERMISSION" + } + ] + } + }, + "com.amazonaws.lakeformation#PermissionTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#PermissionType" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.lakeformation#PermissionTypeMismatchException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

              A message describing the problem.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              The engine does not support filtering data based on the enforced permissions. For example, if you call the GetTemporaryGlueTableCredentials operation with SupportedPermissionType equal to ColumnPermission, but cell-level permissions exist on the table, this exception is thrown.

              ", + "smithy.api#error": "client" + } + }, "com.amazonaws.lakeformation#PlanningStatistics": { "type": "structure", "members": { @@ -4351,6 +4691,12 @@ } } }, + "com.amazonaws.lakeformation#SecretAccessKeyString": { + "type": "string" + }, + "com.amazonaws.lakeformation#SessionTokenString": { + "type": "string" + }, "com.amazonaws.lakeformation#StartQueryPlanning": { "type": "operation", "input": { @@ -5268,6 +5614,20 @@ } } }, + "com.amazonaws.lakeformation#ValueString": { + "type": "string" + }, + "com.amazonaws.lakeformation#ValueStringList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#ValueString" + }, + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, "com.amazonaws.lakeformation#VirtualObject": { "type": "structure", "members": { diff --git a/codegen/sdk-codegen/aws-models/mediaconvert.json b/codegen/sdk-codegen/aws-models/mediaconvert.json index 3d062db2083e..ecaeada7d004 100644 --- a/codegen/sdk-codegen/aws-models/mediaconvert.json +++ b/codegen/sdk-codegen/aws-models/mediaconvert.json @@ -1223,7 +1223,7 @@ } }, "ExternalAudioFileInput": { - "target": "com.amazonaws.mediaconvert#__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEAATTMMOOSSHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEAATTMMOOSS", + "target": "com.amazonaws.mediaconvert#__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEAATTMMOOSSOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEAATTMMOOSSOOGGGGaA", "traits": { "smithy.api#documentation": "Specifies audio data from an external file source.", "smithy.api#jsonName": "externalAudioFileInput" @@ -15023,7 +15023,7 @@ "com.amazonaws.mediaconvert#NoiseFilterPostTemporalSharpening": { "type": "string", "traits": { - "smithy.api#documentation": "Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), you can use this setting to apply sharpening. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply filtering, depending on input type and quality. When you set Noise reducer to Temporal, your output bandwidth is reduced. When Post temporal sharpening is also enabled, that bandwidth reduction is smaller.", + "smithy.api#documentation": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the sharpness of your output is reduced. You can optionally use Post temporal sharpening (PostTemporalSharpening) to apply sharpening to the edges of your output. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (PostTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening.", "smithy.api#enum": [ { "value": "DISABLED", @@ -15040,6 +15040,26 @@ ] } }, + "com.amazonaws.mediaconvert#NoiseFilterPostTemporalSharpeningStrength": { + "type": "string", + "traits": { + "smithy.api#documentation": "Use Post temporal sharpening strength (PostTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), or leave blank, to apply a low amount of sharpening. Set Post temporal sharpening strength to Medium (MEDIUM) to apply medium amount of sharpening. Set Post temporal sharpening strength to High (HIGH) to apply a high amount of sharpening.", + "smithy.api#enum": [ + { + "value": "LOW", + "name": "LOW" + }, + { + "value": "MEDIUM", + "name": "MEDIUM" + }, + { + "value": "HIGH", + "name": "HIGH" + } + ] + } + }, "com.amazonaws.mediaconvert#NoiseReducer": { "type": "structure", "members": { @@ -15173,10 +15193,17 @@ "PostTemporalSharpening": { "target": "com.amazonaws.mediaconvert#NoiseFilterPostTemporalSharpening", "traits": { - "smithy.api#documentation": "Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), you can use this setting to apply sharpening. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply filtering, depending on input type and quality. When you set Noise reducer to Temporal, your output bandwidth is reduced. When Post temporal sharpening is also enabled, that bandwidth reduction is smaller.", + "smithy.api#documentation": "When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), the sharpness of your output is reduced. You can optionally use Post temporal sharpening (PostTemporalSharpening) to apply sharpening to the edges of your output. The default behavior, Auto (AUTO), allows the transcoder to determine whether to apply sharpening, depending on your input type and quality. When you set Post temporal sharpening to Enabled (ENABLED), specify how much sharpening is applied using Post temporal sharpening strength (PostTemporalSharpeningStrength). Set Post temporal sharpening to Disabled (DISABLED) to not apply sharpening.", "smithy.api#jsonName": "postTemporalSharpening" } }, + "PostTemporalSharpeningStrength": { + "target": "com.amazonaws.mediaconvert#NoiseFilterPostTemporalSharpeningStrength", + "traits": { + "smithy.api#documentation": "Use Post temporal sharpening strength (PostTemporalSharpeningStrength) to define the amount of sharpening the transcoder applies to your output. Set Post temporal sharpening strength to Low (LOW), or leave blank, to apply a low amount of sharpening. Set Post temporal sharpening strength to Medium (MEDIUM) to apply medium amount of sharpening. Set Post temporal sharpening strength to High (HIGH) to apply a high amount of sharpening.", + "smithy.api#jsonName": "postTemporalSharpeningStrength" + } + }, "Speed": { "target": "com.amazonaws.mediaconvert#__integerMinNegative1Max3", "traits": { @@ -20849,10 +20876,10 @@ "smithy.api#pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA]|[aA][tT][mM][oO][sS]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[wW][eE][bB][mM]|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[xX][mM][lL]|[oO][gG][gGaA]|[aA][tT][mM][oO][sS])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" } }, - "com.amazonaws.mediaconvert#__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEAATTMMOOSSHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEAATTMMOOSS": { + "com.amazonaws.mediaconvert#__stringPatternS3MM2PPWWEEBBMMMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEAATTMMOOSSOOGGGGaAHttpsMM2VVMMPPEEGGMMPP3AAVVIIMMPP4FFLLVVMMPPTTMMPPGGMM4VVTTRRPPFF4VVMM2TTSSTTSS264HH264MMKKVVMMKKAAMMOOVVMMTTSSMM2TTWWMMVVaAAASSFFVVOOBB3GGPP3GGPPPPMMXXFFDDIIVVXXXXVVIIDDRRAAWWDDVVGGXXFFMM1VV3GG2VVMMFFMM3UU8LLCCHHGGXXFFMMPPEEGG2MMXXFFMMPPEEGG2MMXXFFHHDDWWAAVVYY4MMAAAACCAAIIFFFFMMPP2AACC3EECC3DDTTSSEEAATTMMOOSSOOGGGGaA": { "type": "string", "traits": { - "smithy.api#pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[wW][eE][bB][mM]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]|[aA][tT][mM][oO][sS]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]|[aA][tT][mM][oO][sS])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" + "smithy.api#pattern": "^((s3://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[pP]|[wW][eE][bB][mM]|[mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]|[aA][tT][mM][oO][sS]|[oO][gG][gGaA]))))|(https?://([^\\/]+\\/+)+([^\\/\\.]+|(([^\\/]*)\\.([mM]2[vV]|[mM][pP][eE][gG]|[mM][pP]3|[aA][vV][iI]|[mM][pP]4|[fF][lL][vV]|[mM][pP][tT]|[mM][pP][gG]|[mM]4[vV]|[tT][rR][pP]|[fF]4[vV]|[mM]2[tT][sS]|[tT][sS]|264|[hH]264|[mM][kK][vV]|[mM][kK][aA]|[mM][oO][vV]|[mM][tT][sS]|[mM]2[tT]|[wW][mM][vVaA]|[aA][sS][fF]|[vV][oO][bB]|3[gG][pP]|3[gG][pP][pP]|[mM][xX][fF]|[dD][iI][vV][xX]|[xX][vV][iI][dD]|[rR][aA][wW]|[dD][vV]|[gG][xX][fF]|[mM]1[vV]|3[gG]2|[vV][mM][fF]|[mM]3[uU]8|[lL][cC][hH]|[gG][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF]_[mM][pP][eE][gG]2|[mM][xX][fF][hH][dD]|[wW][aA][vV]|[yY]4[mM]|[aA][aA][cC]|[aA][iI][fF][fF]|[mM][pP]2|[aA][cC]3|[eE][cC]3|[dD][tT][sS][eE]|[aA][tT][mM][oO][sS]|[oO][gG][gGaA])))(\\?([^&=]+=[^&]+&)*[^&=]+=[^&]+)?))$" } }, "com.amazonaws.mediaconvert#__stringPatternSNManifestConfirmConditionNotificationNS": { diff --git a/codegen/sdk-codegen/aws-models/medialive.json b/codegen/sdk-codegen/aws-models/medialive.json index 2f994cad212a..63ed9b72ba30 100644 --- a/codegen/sdk-codegen/aws-models/medialive.json +++ b/codegen/sdk-codegen/aws-models/medialive.json @@ -9862,10 +9862,17 @@ "ProgramDateTime": { "target": "com.amazonaws.medialive#HlsProgramDateTime", "traits": { - "smithy.api#documentation": "Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. The value is calculated as follows: either the program date and time are initialized using the input timecode source, or the time is initialized using the input timecode source and the date is initialized using the timestampOffset.", + "smithy.api#documentation": "Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. The value is calculated using the program date time clock.", "smithy.api#jsonName": "programDateTime" } }, + "ProgramDateTimeClock": { + "target": "com.amazonaws.medialive#HlsProgramDateTimeClock", + "traits": { + "smithy.api#documentation": "Specifies the algorithm used to drive the HLS EXT-X-PROGRAM-DATE-TIME clock. Options include:\n\nINITIALIZE_FROM_OUTPUT_TIMECODE: The PDT clock is initialized as a function of the first output timecode, then incremented by the EXTINF duration of each encoded segment.\n\nSYSTEM_CLOCK: The PDT clock is initialized as a function of the UTC wall clock, then incremented by the EXTINF duration of each encoded segment. If the PDT clock diverges from the wall clock by more than 500ms, it is resynchronized to the wall clock.", + "smithy.api#jsonName": "programDateTimeClock" + } + }, "ProgramDateTimePeriod": { "target": "com.amazonaws.medialive#__integerMin0Max3600", "traits": { @@ -10256,6 +10263,22 @@ ] } }, + "com.amazonaws.medialive#HlsProgramDateTimeClock": { + "type": "string", + "traits": { + "smithy.api#documentation": "Hls Program Date Time Clock", + "smithy.api#enum": [ + { + "value": "INITIALIZE_FROM_OUTPUT_TIMECODE", + "name": "INITIALIZE_FROM_OUTPUT_TIMECODE" + }, + { + "value": "SYSTEM_CLOCK", + "name": "SYSTEM_CLOCK" + } + ] + } + }, "com.amazonaws.medialive#HlsRedundantManifest": { "type": "string", "traits": { diff --git a/codegen/sdk-codegen/aws-models/mediatailor.json b/codegen/sdk-codegen/aws-models/mediatailor.json index 1e50890880e3..0d3a810f7d41 100644 --- a/codegen/sdk-codegen/aws-models/mediatailor.json +++ b/codegen/sdk-codegen/aws-models/mediatailor.json @@ -281,7 +281,7 @@ "FillerSlate": { "target": "com.amazonaws.mediatailor#SlateSource", "traits": { - "smithy.api#documentation": "

              Contains information about the slate used to fill gaps between programs in the schedule. You must configure FillerSlate if your channel uses an LINEAR PlaybackMode.

              " + "smithy.api#documentation": "

              The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses the LINEAR PlaybackMode. MediaTailor doesn't support filler slate for channels using the LOOP PlaybackMode.

              " } }, "LastModifiedTime": { @@ -454,7 +454,7 @@ "FillerSlate": { "target": "com.amazonaws.mediatailor#SlateSource", "traits": { - "smithy.api#documentation": "

              The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses a LINEAR PlaybackMode.

              " + "smithy.api#documentation": "

              The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses the LINEAR PlaybackMode. MediaTailor doesn't support filler slate for channels using the LOOP PlaybackMode.

              " } }, "Outputs": { @@ -2586,6 +2586,21 @@ }, "com.amazonaws.mediatailor#MediaTailor": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "MediaTailor", + "arnNamespace": "mediatailor", + "cloudFormationName": "MediaTailor", + "cloudTrailEventSource": "mediatailor.amazonaws.com", + "endpointPrefix": "api.mediatailor" + }, + "aws.auth#sigv4": { + "name": "mediatailor" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "

              Use the AWS Elemental MediaTailor SDKs and CLI to configure scalable ad insertion and linear channels. With MediaTailor, you can assemble existing content into a linear stream and serve targeted ads to viewers while maintaining broadcast quality in over-the-top (OTT) video applications. For information about using the service, including detailed information about the settings covered in this guide, see the AWS Elemental MediaTailor User Guide.

              Through the SDKs and the CLI you manage AWS Elemental MediaTailor configurations and channels the same as you do through the console. For example, you specify ad insertion behavior and mapping information for the origin server and the ad decision server (ADS).

              ", + "smithy.api#title": "AWS MediaTailor" + }, "version": "2018-04-23", "operations": [ { @@ -2699,22 +2714,7 @@ { "target": "com.amazonaws.mediatailor#UpdateVodSource" } - ], - "traits": { - "aws.api#service": { - "sdkId": "MediaTailor", - "arnNamespace": "mediatailor", - "cloudFormationName": "MediaTailor", - "cloudTrailEventSource": "mediatailor.amazonaws.com", - "endpointPrefix": "api.mediatailor" - }, - "aws.auth#sigv4": { - "name": "mediatailor" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

              Use the AWS Elemental MediaTailor SDKs and CLI to configure scalable ad insertion and linear channels. With MediaTailor, you can assemble existing content into a linear stream and serve targeted ads to viewers while maintaining broadcast quality in over-the-top (OTT) video applications. For information about using the service, including detailed information about the settings covered in this guide, see the AWS Elemental MediaTailor User Guide.

              Through the SDKs and the CLI you manage AWS Elemental MediaTailor configurations and channels the same as you do through the console. For example, you specify ad insertion behavior and mapping information for the origin server and the ad decision server (ADS).

              ", - "smithy.api#title": "AWS MediaTailor" - } + ] }, "com.amazonaws.mediatailor#MessageType": { "type": "string", @@ -3874,6 +3874,12 @@ "smithy.api#required": {} } }, + "FillerSlate": { + "target": "com.amazonaws.mediatailor#SlateSource", + "traits": { + "smithy.api#documentation": "

              The slate used to fill gaps between programs in the schedule. You must configure filler slate if your channel uses the LINEAR PlaybackMode. MediaTailor doesn't support filler slate for channels using the LOOP PlaybackMode.

              " + } + }, "Outputs": { "target": "com.amazonaws.mediatailor#RequestOutputs", "traits": { diff --git a/codegen/sdk-codegen/aws-models/mwaa.json b/codegen/sdk-codegen/aws-models/mwaa.json index 8d3e01ba9c6e..62a5e923d3c6 100644 --- a/codegen/sdk-codegen/aws-models/mwaa.json +++ b/codegen/sdk-codegen/aws-models/mwaa.json @@ -9,7 +9,7 @@ } }, "traits": { - "smithy.api#documentation": "

              Access to the Airflow Web UI or CLI has been Denied. Please follow the MWAA user guide to setup permissions to access the Web UI and CLI functionality.

              ", + "smithy.api#documentation": "

              Access to the Apache Airflow Web UI or CLI has been denied due to insufficient permissions. To learn more, see Accessing an Amazon MWAA environment.

              ", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -35,6 +35,21 @@ }, "com.amazonaws.mwaa#AmazonMWAA": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "MWAA", + "arnNamespace": "airflow", + "cloudFormationName": "AmazonMWAA", + "cloudTrailEventSource": "airflow.amazonaws.com", + "endpointPrefix": "airflow" + }, + "aws.auth#sigv4": { + "name": "airflow" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "Amazon Managed Workflows for Apache Airflow\n

              This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.

              \n\n

              \n Endpoints\n

              \n \n\n

              \n Regions\n

              \n

              For a list of regions that Amazon MWAA supports, see Region availability in the Amazon MWAA User Guide.

              ", + "smithy.api#title": "AmazonMWAA" + }, "version": "2020-07-01", "operations": [ { @@ -70,22 +85,7 @@ { "target": "com.amazonaws.mwaa#UpdateEnvironment" } - ], - "traits": { - "aws.api#service": { - "sdkId": "MWAA", - "arnNamespace": "airflow", - "cloudFormationName": "AmazonMWAA", - "cloudTrailEventSource": "airflow.amazonaws.com", - "endpointPrefix": "airflow" - }, - "aws.auth#sigv4": { - "name": "airflow" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon Managed Workflows for Apache Airflow\n

              This section contains the Amazon Managed Workflows for Apache Airflow (MWAA) API reference documentation. For more information, see What Is Amazon MWAA?.

              ", - "smithy.api#title": "AmazonMWAA" - } + ] }, "com.amazonaws.mwaa#CloudWatchLogGroupArn": { "type": "string", @@ -104,7 +104,7 @@ "min": 1, "max": 64 }, - "smithy.api#pattern": "^[a-z]+([a-z._]*[a-z]+)?$" + "smithy.api#pattern": "^[a-z]+([a-z0-9._]*[a-z0-9_]+)?$" } }, "com.amazonaws.mwaa#ConfigValue": { @@ -112,9 +112,9 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 256 + "max": 65536 }, - "smithy.api#pattern": ".*" + "smithy.api#pattern": "^[ -~]+$" } }, "com.amazonaws.mwaa#CreateCliToken": { @@ -131,7 +131,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Create a CLI token to use Airflow CLI.

              ", + "smithy.api#documentation": "

              Creates a CLI token for the Airflow CLI. To learn more, see Creating an Apache Airflow CLI token.

              ", "smithy.api#endpoint": { "hostPrefix": "env." }, @@ -148,7 +148,7 @@ "Name": { "target": "com.amazonaws.mwaa#EnvironmentName", "traits": { - "smithy.api#documentation": "

              Create a CLI token request for a MWAA environment.

              ", + "smithy.api#documentation": "

              The name of the Amazon MWAA environment. For example, MyMWAAEnvironment.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -161,14 +161,14 @@ "CliToken": { "target": "com.amazonaws.mwaa#Token", "traits": { - "smithy.api#documentation": "

              Create an Airflow CLI login token response for the provided JWT token.

              ", + "smithy.api#documentation": "

              An Airflow CLI login token.

              ", "smithy.api#sensitive": {} } }, "WebServerHostname": { "target": "com.amazonaws.mwaa#Hostname", "traits": { - "smithy.api#documentation": "

              Create an Airflow CLI login token response for the provided webserver hostname.

              " + "smithy.api#documentation": "

              The Airflow web server hostname for the environment.

              " } } } @@ -216,7 +216,7 @@ "ExecutionRoleArn": { "target": "com.amazonaws.mwaa#IamRoleArn", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an AWS Identity and Access Management (IAM) role that grants MWAA permission to access AWS services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              ", + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the execution role for your environment. An execution role is an Amazon Web Services Identity and Access Management (IAM) role that grants MWAA permission to access Amazon Web Services services and resources used by your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              ", "smithy.api#required": {} } }, @@ -237,7 +237,7 @@ "NetworkConfiguration": { "target": "com.amazonaws.mwaa#NetworkConfiguration", "traits": { - "smithy.api#documentation": "

              The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see About networking on Amazon MWAA.

              ", + "smithy.api#documentation": "

              The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              ", "smithy.api#required": {} } }, @@ -287,31 +287,31 @@ "KmsKey": { "target": "com.amazonaws.mwaa#KmsKey", "traits": { - "smithy.api#documentation": "

              The AWS Key Management Service (KMS) key to encrypt the data in your environment. You can use an AWS owned CMK, or a Customer managed CMK (advanced). To learn more, see Get started with Amazon Managed Workflows for Apache Airflow.

              " + "smithy.api#documentation": "

              The Amazon Web Services Key Management Service (KMS) key to encrypt the data in your environment. You can use an Amazon Web Services owned CMK, or a Customer managed CMK (advanced). To learn more, see Create an Amazon MWAA environment.

              " } }, "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

              The Apache Airflow version for your environment. For example, v1.10.12. If no value is specified, defaults to the latest version. Valid values: v1.10.12.

              " + "smithy.api#documentation": "

              The Apache Airflow version for your environment. If no value is specified, defaults to the latest version. Valid values: 1.10.12, 2.0.2. To learn more, see Apache Airflow versions on Amazon Managed Workflows for Apache Airflow (MWAA).

              " } }, "LoggingConfiguration": { "target": "com.amazonaws.mwaa#LoggingConfigurationInput", "traits": { - "smithy.api#documentation": "

              Defines the Apache Airflow logs to send to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              " + "smithy.api#documentation": "

              Defines the Apache Airflow logs to send to CloudWatch Logs.

              " } }, "WeeklyMaintenanceWindowStart": { "target": "com.amazonaws.mwaa#WeeklyMaintenanceWindowStart", "traits": { - "smithy.api#documentation": "

              The day and time of the week to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only. Supported input includes the following:

              \n
                \n
              • \n

                MON|TUE|WED|THU|FRI|SAT|SUN:([01]\\\\d|2[0-3]):(00|30)

                \n
              • \n
              " + "smithy.api#documentation": "

              The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

              " } }, "Tags": { "target": "com.amazonaws.mwaa#TagMap", "traits": { - "smithy.api#documentation": "

              The key-value tag pairs you want to associate to your environment. For example, \"Environment\": \"Staging\". To learn more, see Tagging AWS resources.

              " + "smithy.api#documentation": "

              The key-value tag pairs you want to associate to your environment. For example, \"Environment\": \"Staging\". To learn more, see Tagging Amazon Web Services resources.

              " } }, "WebserverAccessMode": { @@ -329,7 +329,7 @@ "Schedulers": { "target": "com.amazonaws.mwaa#Schedulers", "traits": { - "smithy.api#documentation": "

              The number of Apache Airflow schedulers to run in your environment.

              " + "smithy.api#documentation": "

              The number of Apache Airflow schedulers to run in your environment. Valid values:

              \n
                \n
              • \n

                v2.0.2 - Accepts between 2 to 5. Defaults to 2.

                \n
              • \n
              • \n

                v1.10.12 - Accepts 1.

                \n
              • \n
              " } } }, @@ -371,7 +371,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Create a JWT token to be used to login to Airflow Web UI with claims based Authentication.

              ", + "smithy.api#documentation": "

              Creates a web login token for the Airflow Web UI. To learn more, see Creating an Apache Airflow web login token.

              ", "smithy.api#endpoint": { "hostPrefix": "env." }, @@ -389,7 +389,7 @@ "Name": { "target": "com.amazonaws.mwaa#EnvironmentName", "traits": { - "smithy.api#documentation": "

              Create an Airflow Web UI login token request for a MWAA environment.

              ", + "smithy.api#documentation": "

              The name of the Amazon MWAA environment. For example, MyMWAAEnvironment.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -402,14 +402,14 @@ "WebToken": { "target": "com.amazonaws.mwaa#Token", "traits": { - "smithy.api#documentation": "

              Create an Airflow Web UI login token response for the provided JWT token.

              ", + "smithy.api#documentation": "

              An Airflow web server login token.

              ", "smithy.api#sensitive": {} } }, "WebServerHostname": { "target": "com.amazonaws.mwaa#Hostname", "traits": { - "smithy.api#documentation": "

              Create an Airflow Web UI login token response for the provided webserver hostname.

              " + "smithy.api#documentation": "

              The Airflow web server hostname for the environment.

              " } } } @@ -472,20 +472,20 @@ "Name": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

              Internal only API.

              ", + "smithy.api#documentation": "

              \n Internal only. The name of the dimension.

              ", "smithy.api#required": {} } }, "Value": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

              Internal only API.

              ", + "smithy.api#documentation": "

              \n Internal only. The value of the dimension.

              ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. Represents the dimensions of a metric. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

              " } }, "com.amazonaws.mwaa#Dimensions": { @@ -530,7 +530,7 @@ "ExecutionRoleArn": { "target": "com.amazonaws.mwaa#IamRoleArn", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access AWS resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              " + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              " } }, "ServiceRoleArn": { @@ -542,13 +542,13 @@ "KmsKey": { "target": "com.amazonaws.mwaa#KmsKey", "traits": { - "smithy.api#documentation": "

              The Key Management Service (KMS) encryption key used to encrypt the data in your environment.

              " + "smithy.api#documentation": "

              The Amazon Web Services Key Management Service (KMS) encryption key used to encrypt the data in your environment.

              " } }, "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

              The Apache Airflow version on your environment. For example, v1.10.12.

              " + "smithy.api#documentation": "

              The Apache Airflow version on your environment. Valid values: 1.10.12, 2.0.2.

              " } }, "SourceBucketArn": { @@ -606,27 +606,33 @@ } }, "NetworkConfiguration": { - "target": "com.amazonaws.mwaa#NetworkConfiguration" + "target": "com.amazonaws.mwaa#NetworkConfiguration", + "traits": { + "smithy.api#documentation": "

              Describes the VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              " + } }, "LoggingConfiguration": { "target": "com.amazonaws.mwaa#LoggingConfiguration", "traits": { - "smithy.api#documentation": "

              The Apache Airflow logs being sent to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              " + "smithy.api#documentation": "

              The Apache Airflow logs published to CloudWatch Logs.

              " } }, "LastUpdate": { - "target": "com.amazonaws.mwaa#LastUpdate" + "target": "com.amazonaws.mwaa#LastUpdate", + "traits": { + "smithy.api#documentation": "

              The status of the last update on the environment.

              " + } }, "WeeklyMaintenanceWindowStart": { "target": "com.amazonaws.mwaa#WeeklyMaintenanceWindowStart", "traits": { - "smithy.api#documentation": "

              The day and time of the week that weekly maintenance updates are scheduled. For example: TUE:03:30.

              " + "smithy.api#documentation": "

              The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For example: TUE:03:30.

              " } }, "Tags": { "target": "com.amazonaws.mwaa#TagMap", "traits": { - "smithy.api#documentation": "

              The key-value tag pairs associated to your environment. For example, \"Environment\": \"Staging\". To learn more, see Tagging AWS resources.

              " + "smithy.api#documentation": "

              The key-value tag pairs associated to your environment. For example, \"Environment\": \"Staging\". To learn more, see Tagging Amazon Web Services resources.

              " } }, "WebserverAccessMode": { @@ -649,7 +655,7 @@ } }, "traits": { - "smithy.api#documentation": "

              The Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              " + "smithy.api#documentation": "

              Describes an Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              " } }, "com.amazonaws.mwaa#EnvironmentArn": { @@ -759,7 +765,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Retrieves the details of an Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              ", + "smithy.api#documentation": "

              Describes an Amazon Managed Workflows for Apache Airflow (MWAA) environment.

              ", "smithy.api#endpoint": { "hostPrefix": "api." }, @@ -844,7 +850,7 @@ "Status": { "target": "com.amazonaws.mwaa#UpdateStatus", "traits": { - "smithy.api#documentation": "

              The status of the last update on the environment. Valid values: SUCCESS, PENDING, FAILED.

              " + "smithy.api#documentation": "

              The status of the last update on the environment.

              " } }, "CreatedAt": { @@ -858,10 +864,16 @@ "traits": { "smithy.api#documentation": "

              The error that was encountered during the last update of the environment.

              " } + }, + "Source": { + "target": "com.amazonaws.mwaa#UpdateSource", + "traits": { + "smithy.api#documentation": "

              The source of the last update to the environment. Includes internal processes by Amazon MWAA, such as an environment maintenance update.

              " + } } }, "traits": { - "smithy.api#documentation": "

              The status of the last update on the environment, and any errors that were encountered.

              " + "smithy.api#documentation": "

              Describes the status of the last update on the environment, and any errors that were encountered.

              " } }, "com.amazonaws.mwaa#ListEnvironments": { @@ -928,7 +940,7 @@ "Environments": { "target": "com.amazonaws.mwaa#EnvironmentList", "traits": { - "smithy.api#documentation": "

              Returns the list of Amazon MWAA environments.

              ", + "smithy.api#documentation": "

              Returns a list of Amazon MWAA environments.

              ", "smithy.api#required": {} } }, @@ -991,7 +1003,7 @@ "Tags": { "target": "com.amazonaws.mwaa#TagMap", "traits": { - "smithy.api#documentation": "

              The key-value tag pairs associated to your environment. To learn more, see Tagging AWS resources.

              " + "smithy.api#documentation": "

              The key-value tag pairs associated to your environment. To learn more, see Tagging Amazon Web Services resources.

              " } } } @@ -1000,46 +1012,76 @@ "type": "structure", "members": { "DagProcessingLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration" + "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration", + "traits": { + "smithy.api#documentation": "

              The Airflow DAG processing logs published to CloudWatch Logs and the log level.

              " + } }, "SchedulerLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration" + "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration", + "traits": { + "smithy.api#documentation": "

              The Airflow scheduler logs published to CloudWatch Logs and the log level.

              " + } }, "WebserverLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration" + "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration", + "traits": { + "smithy.api#documentation": "

              The Airflow web server logs published to CloudWatch Logs and the log level.

              " + } }, "WorkerLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration" + "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration", + "traits": { + "smithy.api#documentation": "

              The Airflow worker logs published to CloudWatch Logs and the log level.

              " + } }, "TaskLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration" + "target": "com.amazonaws.mwaa#ModuleLoggingConfiguration", + "traits": { + "smithy.api#documentation": "

              The Airflow task logs published to CloudWatch Logs and the log level.

              " + } } }, "traits": { - "smithy.api#documentation": "

              Defines the Apache Airflow logs to send to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              " + "smithy.api#documentation": "

              Describes the Apache Airflow log types that are published to CloudWatch Logs.

              " } }, "com.amazonaws.mwaa#LoggingConfigurationInput": { "type": "structure", "members": { "DagProcessingLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput" + "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput", + "traits": { + "smithy.api#documentation": "

              Publishes Airflow DAG processing logs to CloudWatch Logs.

              " + } }, "SchedulerLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput" + "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput", + "traits": { + "smithy.api#documentation": "

              Publishes Airflow scheduler logs to CloudWatch Logs.

              " + } }, "WebserverLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput" + "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput", + "traits": { + "smithy.api#documentation": "

              Publishes Airflow web server logs to CloudWatch Logs.

              " + } }, "WorkerLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput" + "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput", + "traits": { + "smithy.api#documentation": "

              Publishes Airflow worker logs to CloudWatch Logs.

              " + } }, "TaskLogs": { - "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput" + "target": "com.amazonaws.mwaa#ModuleLoggingConfigurationInput", + "traits": { + "smithy.api#documentation": "

              Publishes Airflow task logs to CloudWatch Logs.

              " + } } }, "traits": { - "smithy.api#documentation": "

              Defines the Apache Airflow logs to send to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              " + "smithy.api#documentation": "

              Defines the Apache Airflow log types to send to CloudWatch Logs.

              " } }, "com.amazonaws.mwaa#LoggingEnabled": { @@ -1096,41 +1138,44 @@ "MetricName": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "

              Internal only API.

              ", + "smithy.api#documentation": "

              \n Internal only. The name of the metric.

              ", "smithy.api#required": {} } }, "Timestamp": { "target": "smithy.api#Timestamp", "traits": { - "smithy.api#documentation": "

              Internal only API.

              ", + "smithy.api#documentation": "

              \n Internal only. The time the metric data was received.

              ", "smithy.api#required": {} } }, "Dimensions": { "target": "com.amazonaws.mwaa#Dimensions", "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. The dimensions associated with the metric.

              " } }, "Value": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. The value for the metric.

              " } }, "Unit": { - "target": "com.amazonaws.mwaa#Unit" + "target": "com.amazonaws.mwaa#Unit", + "traits": { + "smithy.api#documentation": "

              \n Internal only. The unit used to store the metric.

              " + } }, "StatisticValues": { "target": "com.amazonaws.mwaa#StatisticSet", "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. The statistical values for the metric.

              " } } }, "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. Collects Apache Airflow metrics. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

              " } }, "com.amazonaws.mwaa#MinWorkers": { @@ -1148,13 +1193,13 @@ "Enabled": { "target": "com.amazonaws.mwaa#LoggingEnabled", "traits": { - "smithy.api#documentation": "

              Indicates whether to enable the Apache Airflow log type (e.g. DagProcessingLogs) in CloudWatch Logs.

              " + "smithy.api#documentation": "

              Indicates whether the Apache Airflow log type (e.g. DagProcessingLogs) is enabled.

              " } }, "LogLevel": { "target": "com.amazonaws.mwaa#LoggingLevel", "traits": { - "smithy.api#documentation": "

              Defines the Apache Airflow logs to send for the log type (e.g. DagProcessingLogs) to CloudWatch Logs. Valid values: CRITICAL, ERROR, WARNING, INFO.

              " + "smithy.api#documentation": "

              The Apache Airflow log level for the log type (e.g. DagProcessingLogs).

              " } }, "CloudWatchLogGroupArn": { @@ -1165,7 +1210,7 @@ } }, "traits": { - "smithy.api#documentation": "

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              " + "smithy.api#documentation": "

              Describes the Apache Airflow log details for the log type (e.g. DagProcessingLogs).

              " } }, "com.amazonaws.mwaa#ModuleLoggingConfigurationInput": { @@ -1174,20 +1219,20 @@ "Enabled": { "target": "com.amazonaws.mwaa#LoggingEnabled", "traits": { - "smithy.api#documentation": "

              Indicates whether to enable the Apache Airflow log type (e.g. DagProcessingLogs) in CloudWatch Logs.

              ", + "smithy.api#documentation": "

              Indicates whether to enable the Apache Airflow log type (e.g. DagProcessingLogs).

              ", "smithy.api#required": {} } }, "LogLevel": { "target": "com.amazonaws.mwaa#LoggingLevel", "traits": { - "smithy.api#documentation": "

              Defines the Apache Airflow logs to send for the log type (e.g. DagProcessingLogs) to CloudWatch Logs. Valid values: CRITICAL, ERROR, WARNING, INFO.

              ", + "smithy.api#documentation": "

              Defines the Apache Airflow log level (e.g. INFO) to send to CloudWatch Logs.

              ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

              Defines the type of logs to send for the Apache Airflow log type (e.g. DagProcessingLogs). Valid values: CloudWatchLogGroupArn, Enabled, LogLevel.

              " + "smithy.api#documentation": "

              Enables the Apache Airflow log type (e.g. DagProcessingLogs) and defines the log level to send to CloudWatch Logs (e.g. INFO).

              " } }, "com.amazonaws.mwaa#NetworkConfiguration": { @@ -1196,18 +1241,18 @@ "SubnetIds": { "target": "com.amazonaws.mwaa#SubnetList", "traits": { - "smithy.api#documentation": "

              A list of 2 subnet IDs. Required to create an environment. Must be private subnets in two different availability zones. A subnet must be attached to the same VPC as the security group.

              " + "smithy.api#documentation": "

              A list of subnet IDs. To learn more, see About networking on Amazon MWAA.

              " } }, "SecurityGroupIds": { "target": "com.amazonaws.mwaa#SecurityGroupList", "traits": { - "smithy.api#documentation": "

              A list of 1 or more security group IDs. Accepts up to 5 security group IDs. A security group must be attached to the same VPC as the subnets. To learn more, see Security in your VPC on Amazon MWAA.

              " + "smithy.api#documentation": "

              A list of security group IDs. To learn more, see Security in your VPC on Amazon MWAA.

              " } } }, "traits": { - "smithy.api#documentation": "

              The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see About networking on Amazon MWAA.

              " + "smithy.api#documentation": "

              Describes the VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              " } }, "com.amazonaws.mwaa#NextToken": { @@ -1235,7 +1280,7 @@ } ], "traits": { - "smithy.api#documentation": "An operation for publishing metrics from the customers to the Ops plane.", + "smithy.api#documentation": "

              \n Internal only. Publishes environment health metrics to Amazon CloudWatch.

              ", "smithy.api#endpoint": { "hostPrefix": "ops." }, @@ -1252,7 +1297,7 @@ "EnvironmentName": { "target": "com.amazonaws.mwaa#EnvironmentName", "traits": { - "smithy.api#documentation": "

              Publishes environment metric data to Amazon CloudWatch.

              ", + "smithy.api#documentation": "

              \n Internal only. The name of the environment.

              ", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1260,7 +1305,7 @@ "MetricData": { "target": "com.amazonaws.mwaa#MetricData", "traits": { - "smithy.api#documentation": "

              Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metrica.

              ", + "smithy.api#documentation": "

              \n Internal only. Publishes metrics to Amazon CloudWatch. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

              ", "smithy.api#required": {} } } @@ -1349,30 +1394,30 @@ "SampleCount": { "target": "smithy.api#Integer", "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. The number of samples used for the statistic set.

              " } }, "Sum": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. The sum of values for the sample set.

              " } }, "Minimum": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. The minimum value of the sample set.

              " } }, "Maximum": { "target": "smithy.api#Double", "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. The maximum value of the sample set.

              " } } }, "traits": { - "smithy.api#documentation": "

              Internal only API.

              " + "smithy.api#documentation": "

              \n Internal only. Represents a set of statistics that describe a specific metric. To learn more about the metrics published to Amazon CloudWatch, see Amazon MWAA performance metrics in Amazon CloudWatch.

              " } }, "com.amazonaws.mwaa#SubnetId": { @@ -1478,7 +1523,7 @@ "Tags": { "target": "com.amazonaws.mwaa#TagMap", "traits": { - "smithy.api#documentation": "

              The key-value tag pairs you want to associate to your environment. For example, \"Environment\": \"Staging\". To learn more, see Tagging AWS resources.

              ", + "smithy.api#documentation": "

              The key-value tag pairs you want to associate to your environment. For example, \"Environment\": \"Staging\". To learn more, see Tagging Amazon Web Services resources.

              ", "smithy.api#required": {} } } @@ -1504,7 +1549,6 @@ "com.amazonaws.mwaa#Unit": { "type": "string", "traits": { - "smithy.api#documentation": "Unit", "smithy.api#enum": [ { "value": "Seconds", @@ -1722,13 +1766,13 @@ "ExecutionRoleArn": { "target": "com.amazonaws.mwaa#IamRoleArn", "traits": { - "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access AWS resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              " + "smithy.api#documentation": "

              The Amazon Resource Name (ARN) of the execution role in IAM that allows MWAA to access Amazon Web Services resources in your environment. For example, arn:aws:iam::123456789:role/my-execution-role. To learn more, see Amazon MWAA Execution role.

              " } }, "AirflowVersion": { "target": "com.amazonaws.mwaa#AirflowVersion", "traits": { - "smithy.api#documentation": "

              The Apache Airflow version for your environment. For example, v1.10.12. If no value is specified, defaults to the latest version. Valid values: v1.10.12.

              " + "smithy.api#documentation": "

              The Apache Airflow version for your environment. If no value is specified, defaults to the latest version. Valid values: 1.10.12, 2.0.2.

              " } }, "SourceBucketArn": { @@ -1789,19 +1833,19 @@ "NetworkConfiguration": { "target": "com.amazonaws.mwaa#UpdateNetworkConfigurationInput", "traits": { - "smithy.api#documentation": "

              The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see About networking on Amazon MWAA.

              " + "smithy.api#documentation": "

              The VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              " } }, "LoggingConfiguration": { "target": "com.amazonaws.mwaa#LoggingConfigurationInput", "traits": { - "smithy.api#documentation": "

              Defines the Apache Airflow logs to send to CloudWatch Logs: DagProcessingLogs, SchedulerLogs, TaskLogs, WebserverLogs, WorkerLogs.

              " + "smithy.api#documentation": "

              The Apache Airflow log types to send to CloudWatch Logs.

              " } }, "WeeklyMaintenanceWindowStart": { "target": "com.amazonaws.mwaa#WeeklyMaintenanceWindowStart", "traits": { - "smithy.api#documentation": "

              The day and time of the week to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only. Supported input includes the following:

              \n
                \n
              • \n

                MON|TUE|WED|THU|FRI|SAT|SUN:([01]\\\\d|2[0-3]):(00|30)

                \n
              • \n
              " + "smithy.api#documentation": "

              The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time to start weekly maintenance updates of your environment in the following format: DAY:HH:MM. For example: TUE:03:30. You can specify a start time in 30 minute increments only.

              " } }, "WebserverAccessMode": { @@ -1852,7 +1896,7 @@ } }, "traits": { - "smithy.api#documentation": "

              An object containing the error encountered with the last update: ErrorCode, ErrorMessage.

              " + "smithy.api#documentation": "

              Describes the error(s) encountered with the last update of the environment.

              " } }, "com.amazonaws.mwaa#UpdateNetworkConfigurationInput": { @@ -1861,13 +1905,23 @@ "SecurityGroupIds": { "target": "com.amazonaws.mwaa#SecurityGroupList", "traits": { - "smithy.api#documentation": "

              A list of 1 or more security group IDs. Accepts up to 5 security group IDs. A security group must be attached to the same VPC as the subnets. To learn more, see Security in your VPC on Amazon MWAA.

              ", + "smithy.api#documentation": "

              A list of security group IDs. A security group must be attached to the same VPC as the subnets. To learn more, see Security in your VPC on Amazon MWAA.

              ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

              The VPC networking components used to secure and enable network traffic between the AWS resources for your environment. To learn more, see About networking on Amazon MWAA.

              " + "smithy.api#documentation": "

              Defines the VPC networking components used to secure and enable network traffic between the Amazon Web Services resources for your environment. To learn more, see About networking on Amazon MWAA.

              " + } + }, + "com.amazonaws.mwaa#UpdateSource": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^.+$" } }, "com.amazonaws.mwaa#UpdateStatus": { diff --git a/codegen/sdk-codegen/aws-models/opensearch.json b/codegen/sdk-codegen/aws-models/opensearch.json index a065b6a8343e..16df6034e34e 100644 --- a/codegen/sdk-codegen/aws-models/opensearch.json +++ b/codegen/sdk-codegen/aws-models/opensearch.json @@ -283,6 +283,18 @@ "traits": { "smithy.api#documentation": "

              Describes the SAML application configured for a domain.

              " } + }, + "AnonymousAuthDisableDate": { + "target": "com.amazonaws.opensearch#DisableTimestamp", + "traits": { + "smithy.api#documentation": "

              Specifies the Anonymous Auth Disable Date when Anonymous Auth is enabled.

              " + } + }, + "AnonymousAuthEnabled": { + "target": "com.amazonaws.opensearch#Boolean", + "traits": { + "smithy.api#documentation": "

              True if Anonymous auth is enabled. Anonymous auth can be enabled only when AdvancedSecurity is enabled on existing domains.

              " + } } }, "traits": { @@ -315,6 +327,12 @@ "traits": { "smithy.api#documentation": "

              The SAML application configuration for the domain.

              " } + }, + "AnonymousAuthEnabled": { + "target": "com.amazonaws.opensearch#Boolean", + "traits": { + "smithy.api#documentation": "

              True if Anonymous auth is enabled. Anonymous auth can be enabled only when AdvancedSecurity is enabled on existing domains.

              " + } } }, "traits": { @@ -345,6 +363,24 @@ }, "com.amazonaws.opensearch#AmazonOpenSearchService": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "OpenSearch", + "arnNamespace": "es", + "cloudFormationName": "OpenSearch", + "cloudTrailEventSource": "opensearch.amazonaws.com", + "endpointPrefix": "es" + }, + "aws.auth#sigv4": { + "name": "es" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "Amazon OpenSearch Configuration Service\n

              Use the Amazon OpenSearch configuration API to create, configure, and manage Amazon OpenSearch Service domains.

              \n

              For sample code that uses the configuration API, see the \n Amazon OpenSearch Service Developer Guide.\n The guide also contains \n sample\n code for sending signed HTTP requests to the OpenSearch APIs.\n

              \n

              The endpoint for configuration service requests is region-specific: es.region.amazonaws.com.\n For example, es.us-east-1.amazonaws.com. For a current list of supported regions and endpoints,\n see Regions and Endpoints.\n

              ", + "smithy.api#title": "Amazon OpenSearch Service", + "smithy.api#xmlNamespace": { + "uri": "http://es.amazonaws.com/doc/2021-01-01/" + } + }, "version": "2021-01-01", "operations": [ { @@ -464,25 +500,7 @@ { "target": "com.amazonaws.opensearch#UpgradeDomain" } - ], - "traits": { - "aws.api#service": { - "sdkId": "OpenSearch", - "arnNamespace": "es", - "cloudFormationName": "OpenSearch", - "cloudTrailEventSource": "opensearch.amazonaws.com", - "endpointPrefix": "es" - }, - "aws.auth#sigv4": { - "name": "es" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon OpenSearch Configuration Service\n

              Use the Amazon OpenSearch configuration API to create, configure, and manage Amazon OpenSearch Service domains.

              \n

              For sample code that uses the configuration API, see the \n Amazon OpenSearch Service Developer Guide.\n The guide also contains \n sample\n code for sending signed HTTP requests to the OpenSearch APIs.\n

              \n

              The endpoint for configuration service requests is region-specific: es.region.amazonaws.com.\n For example, es.us-east-1.amazonaws.com. For a current list of supported regions and endpoints,\n see Regions and Endpoints.\n

              ", - "smithy.api#title": "Amazon OpenSearch Service", - "smithy.api#xmlNamespace": { - "uri": "http://es.amazonaws.com/doc/2021-01-01/" - } - } + ] }, "com.amazonaws.opensearch#AssociatePackage": { "type": "operation", @@ -2614,6 +2632,9 @@ "smithy.api#documentation": "

              Container for results from\n DescribeReservedInstances\n

              " } }, + "com.amazonaws.opensearch#DisableTimestamp": { + "type": "timestamp" + }, "com.amazonaws.opensearch#DisabledOperationException": { "type": "structure", "members": { diff --git a/codegen/sdk-codegen/aws-models/quicksight.json b/codegen/sdk-codegen/aws-models/quicksight.json index 879784840490..1e56f145f8b6 100644 --- a/codegen/sdk-codegen/aws-models/quicksight.json +++ b/codegen/sdk-codegen/aws-models/quicksight.json @@ -1681,7 +1681,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Creates a dataset.

              ", + "smithy.api#documentation": "

              Creates a dataset. This operation doesn't support datasets that include uploaded files as a source.

              ", "smithy.api#http": { "method": "POST", "uri": "/accounts/{AwsAccountId}/data-sets", @@ -2580,7 +2580,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Creates and starts a new SPICE ingestion on a dataset

              \n\t\t\n\t\t

              Any ingestions operating on tagged datasets inherit the same tags automatically for use in\n\t\t\taccess control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using\n\t\t\t\ttags? in the Amazon Web Services Knowledge Center. Tags are visible on the tagged dataset, but not on the ingestion resource.

              ", + "smithy.api#documentation": "

              Creates and starts a new SPICE ingestion for a dataset. You can manually refresh datasets in\n\t\t\tan Enterprise edition account 32 times in a 24-hour period. You can manually refresh\n\t\t\tdatasets in a Standard edition account 8 times in a 24-hour period. Each 24-hour period\n\t\t\tis measured starting 24 hours before the current date and time.

              \n\t\t\n\t\t

              Any ingestions operating on tagged datasets inherit the same tags automatically for use in\n\t\t\taccess control. For an example, see How do I create an IAM policy to control access to Amazon EC2 resources using\n\t\t\t\ttags? in the Amazon Web Services Knowledge Center. Tags are visible on the tagged dataset, but not on the ingestion resource.

              ", "smithy.api#http": { "method": "PUT", "uri": "/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}", @@ -6800,7 +6800,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Describes a dataset.

              ", + "smithy.api#documentation": "

              Describes a dataset. This operation doesn't support datasets that include uploaded files as a source.

              ", "smithy.api#http": { "method": "GET", "uri": "/accounts/{AwsAccountId}/data-sets/{DataSetId}", @@ -17602,7 +17602,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Updates a dataset.

              ", + "smithy.api#documentation": "

              Updates a dataset. This operation doesn't support datasets that include uploaded files as a source.

              ", "smithy.api#http": { "method": "PUT", "uri": "/accounts/{AwsAccountId}/data-sets/{DataSetId}", diff --git a/codegen/sdk-codegen/aws-models/rds.json b/codegen/sdk-codegen/aws-models/rds.json index fd4e96e57bb5..2c6e81efecc4 100644 --- a/codegen/sdk-codegen/aws-models/rds.json +++ b/codegen/sdk-codegen/aws-models/rds.json @@ -1900,7 +1900,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Creates a custom DB engine version (CEV). A CEV is a binary volume snapshot of a database engine and specific\n AMI. The only supported engine is Oracle Database 19c Enterprise Edition with the January 2021 or later\n RU/RUR.

              \n

              Amazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software.\n The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create\n your database. With Amazon RDS Custom, you upload your database installation files in Amazon S3.

              \n

              When you create a custom engine version, you specify the files in a JSON document called a CEV manifest. \n This document describes installation .zip files stored in Amazon S3. RDS Custom creates your CEV from \n the installation files that you provided. This service model is called Bring Your Own Media (BYOM).

              \n

              Creation takes approximately two hours. If creation fails, RDS Custom issues RDS-EVENT-0196 with \n the message Creation failed for custom engine version, and includes details about the failure. \n For example, the event prints missing files.

              \n

              After you create the CEV, it is available for use. You can create multiple CEVs, and create multiple \n RDS Custom instances from any CEV. You can also change the status of a CEV to make it available or\n inactive.

              \n \n

              The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with \n Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the \n CreateCustomDbEngineVersion event aren't logged. However, you might see calls from the \n API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for \n the CreateCustomDbEngineVersion event.

              \n
              \n

              For more information, see \n Creating a CEV in the Amazon RDS User Guide.

              " + "smithy.api#documentation": "

              Creates a custom DB engine version (CEV). A CEV is a binary volume snapshot of a database engine and specific\n AMI. The only supported engine is Oracle Database 19c Enterprise Edition with the January 2021 or later\n RU/RUR.

              \n

              Amazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software.\n The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create\n your database. With Amazon RDS Custom for Oracle, you upload your database installation files in Amazon S3.

              \n

              When you create a custom engine version, you specify the files in a JSON document called a CEV manifest. \n This document describes installation .zip files stored in Amazon S3. RDS Custom creates your CEV from \n the installation files that you provided. This service model is called Bring Your Own Media (BYOM).

              \n

              Creation takes approximately two hours. If creation fails, RDS Custom issues RDS-EVENT-0196 with \n the message Creation failed for custom engine version, and includes details about the failure. \n For example, the event prints missing files.

              \n

              After you create the CEV, it is available for use. You can create multiple CEVs, and create multiple \n RDS Custom instances from any CEV. You can also change the status of a CEV to make it available or\n inactive.

              \n \n

              The MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with \n Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the \n CreateCustomDbEngineVersion event aren't logged. However, you might see calls from the \n API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for \n the CreateCustomDbEngineVersion event.

              \n
              \n

              For more information, see \n Creating a CEV in the Amazon RDS User Guide.

              " } }, "com.amazonaws.rds#CreateCustomDBEngineVersionMessage": { @@ -1916,7 +1916,7 @@ "EngineVersion": { "target": "com.amazonaws.rds#CustomEngineVersion", "traits": { - "smithy.api#documentation": "

              The name of your CEV. The name format is 19.customized_string\n . For example, \n a valid name is 19.my_cev1. This setting is required for RDS Custom, but optional for Amazon RDS. \n The combination of Engine and EngineVersion is unique per customer per Region.

              ", + "smithy.api#documentation": "

              The name of your CEV. The name format is 19.customized_string\n . For example, \n a valid name is 19.my_cev1. This setting is required for RDS Custom for Oracle, but optional for Amazon RDS. \n The combination of Engine and EngineVersion is unique per customer per Region.

              ", "smithy.api#required": {} } }, @@ -2157,14 +2157,14 @@ "Engine": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The name of the database engine to be used for this DB cluster.

              \n

              Valid Values:

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              ", + "smithy.api#documentation": "

              The name of the database engine to be used for this DB cluster.

              \n

              Valid Values:

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              ", "smithy.api#required": {} } }, "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The version number of the database engine to use.

              \n

              To list all of the available engine versions for MySQL 5.6-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for MySQL 5.7-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for Aurora PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for MySQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              \n Aurora MySQL\n

              \n

              For information, see MySQL on Amazon RDS Versions in the \n Amazon Aurora User Guide.\n

              \n

              \n Aurora PostgreSQL\n

              \n

              For information, see Amazon Aurora PostgreSQL releases and engine versions in the \n Amazon Aurora User Guide.\n

              \n

              \n MySQL\n

              \n

              For information, see MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.\n

              \n

              \n PostgreSQL\n

              \n

              For information, see Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.\n

              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              " + "smithy.api#documentation": "

              The version number of the database engine to use.

              \n

              To list all of the available engine versions for MySQL 5.6-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for Aurora PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for MySQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              \n Aurora MySQL\n

              \n

              For information, see MySQL on Amazon RDS Versions in the \n Amazon Aurora User Guide.\n

              \n

              \n Aurora PostgreSQL\n

              \n

              For information, see Amazon Aurora PostgreSQL releases and engine versions in the \n Amazon Aurora User Guide.\n

              \n

              \n MySQL\n

              \n

              For information, see MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.\n

              \n

              \n PostgreSQL\n

              \n

              For information, see Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.\n

              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              " } }, "Port": { @@ -2320,7 +2320,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              Specifies the storage type to be associated with the DB cluster.

              \n

              This setting is required to create a Multi-AZ DB cluster.

              \n

              \n Valid values: standard | gp2 | io1\n

              \n

              \n If you specify io1, also include a value for the\n Iops parameter.\n

              \n

              \n Default: io1 if the Iops parameter\n is specified, otherwise gp2\n

              \n

              Valid for: Multi-AZ DB clusters only

              " + "smithy.api#documentation": "

              Specifies the storage type to be associated with the DB cluster.

              \n

              This setting is required to create a Multi-AZ DB cluster.

              \n

              \n Valid values: io1\n

              \n

              \n When specified, a value for the Iops parameter is required.\n

              \n

              \n Default: io1\n

              \n

              Valid for: Multi-AZ DB clusters only

              " } }, "Iops": { @@ -2409,7 +2409,7 @@ "DBParameterGroupFamily": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster \n parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

              \n

              \n Aurora MySQL\n

              \n

              Example: aurora5.6, aurora-mysql5.7\n

              \n

              \n Aurora PostgreSQL\n

              \n

              Example: aurora-postgresql9.6\n

              \n

              \n RDS for MySQL\n

              \n

              Example: mysql8.0\n

              \n

              \n RDS for PostgreSQL\n

              \n

              Example: postgres12\n

              \n

              To list all of the available parameter group families for a DB engine, use the following command:

              \n

              \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine \n

              \n

              For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:

              \n

              \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql\n

              \n \n

              The output contains duplicates.

              \n
              \n

              The following are the valid DB engine values:

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              ", + "smithy.api#documentation": "

              The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster \n parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.

              \n

              \n Aurora MySQL\n

              \n

              Example: aurora5.6, aurora-mysql5.7, aurora-mysql8.0\n

              \n

              \n Aurora PostgreSQL\n

              \n

              Example: aurora-postgresql9.6\n

              \n

              \n RDS for MySQL\n

              \n

              Example: mysql8.0\n

              \n

              \n RDS for PostgreSQL\n

              \n

              Example: postgres12\n

              \n

              To list all of the available parameter group families for a DB engine, use the following command:

              \n

              \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine \n

              \n

              For example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:

              \n

              \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql\n

              \n \n

              The output contains duplicates.

              \n
              \n

              The following are the valid DB engine values:

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              ", "smithy.api#required": {} } }, @@ -2589,7 +2589,7 @@ "DBName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The meaning of this parameter differs according to the database engine you use.

              \n

              \n MySQL\n

              \n

              The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

              \n

              Constraints:

              \n
                \n
              • \n

                Must contain 1 to 64 letters or numbers.

                \n
              • \n
              • \n

                Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

                \n
              • \n
              • \n

                Can't be a word reserved by the specified database engine

                \n
              • \n
              \n

              \n MariaDB\n

              \n

              The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

              \n

              Constraints:

              \n
                \n
              • \n

                Must contain 1 to 64 letters or numbers.

                \n
              • \n
              • \n

                Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

                \n
              • \n
              • \n

                Can't be a word reserved by the specified database engine

                \n
              • \n
              \n

              \n PostgreSQL\n

              \n

              The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres \n is created in the DB instance.

              \n

              Constraints:

              \n
                \n
              • \n

                Must contain 1 to 63 letters, numbers, or underscores.

                \n
              • \n
              • \n

                Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

                \n
              • \n
              • \n

                Can't be a word reserved by the specified database engine

                \n
              • \n
              \n

              \n Oracle\n

              \n

              The Oracle System ID (SID) of the created DB instance.\n If you specify null, the default value ORCL is used.\n You can't specify the string NULL, or any other reserved word, for DBName. \n

              \n

              Default: ORCL\n

              \n

              Constraints:

              \n
                \n
              • \n

                Can't be longer than 8 characters

                \n
              • \n
              \n

              \n Amazon RDS Custom\n

              \n

              The Oracle System ID (SID) of the created RDS Custom DB instance.\n If you don't specify a value, the default value is ORCL.\n

              \n

              Default: ORCL\n

              \n

              Constraints:

              \n
                \n
              • \n

                It must contain 1 to 8 alphanumeric characters.

                \n
              • \n
              • \n

                It must contain a letter.

                \n
              • \n
              • \n

                It can't be a word reserved by the database engine.

                \n
              • \n
              \n

              \n SQL Server\n

              \n

              Not applicable. Must be null.

              \n

              \n Amazon Aurora MySQL\n

              \n

              The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is\n created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created \n in the DB cluster.

              \n

              Constraints:

              \n
                \n
              • \n

                It must contain 1 to 64 alphanumeric characters.

                \n
              • \n
              • \n

                It can't be a word reserved by the database engine.

                \n
              • \n
              \n

              \n Amazon Aurora PostgreSQL\n

              \n

              The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is\n created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, \n a database named postgres is created in the DB cluster.

              \n

              Constraints:

              \n
                \n
              • \n

                It must contain 1 to 63 alphanumeric characters.

                \n
              • \n
              • \n

                It must begin with a letter or an underscore.\n Subsequent characters can be letters, underscores, or digits\n (0 to 9).

                \n
              • \n
              • \n

                It can't be a word reserved by the\n database engine.

                \n
              • \n
              " + "smithy.api#documentation": "

              The meaning of this parameter differs according to the database engine you use.

              \n

              \n MySQL\n

              \n

              The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

              \n

              Constraints:

              \n
                \n
              • \n

                Must contain 1 to 64 letters or numbers.

                \n
              • \n
              • \n

                Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

                \n
              • \n
              • \n

                Can't be a word reserved by the specified database engine

                \n
              • \n
              \n

              \n MariaDB\n

              \n

              The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.

              \n

              Constraints:

              \n
                \n
              • \n

                Must contain 1 to 64 letters or numbers.

                \n
              • \n
              • \n

                Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

                \n
              • \n
              • \n

                Can't be a word reserved by the specified database engine

                \n
              • \n
              \n

              \n PostgreSQL\n

              \n

              The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres \n is created in the DB instance.

              \n

              Constraints:

              \n
                \n
              • \n

                Must contain 1 to 63 letters, numbers, or underscores.

                \n
              • \n
              • \n

                Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).

                \n
              • \n
              • \n

                Can't be a word reserved by the specified database engine

                \n
              • \n
              \n

              \n Oracle\n

              \n

              The Oracle System ID (SID) of the created DB instance.\n If you specify null, the default value ORCL is used.\n You can't specify the string NULL, or any other reserved word, for DBName. \n

              \n

              Default: ORCL\n

              \n

              Constraints:

              \n
                \n
              • \n

                Can't be longer than 8 characters

                \n
              • \n
              \n

              \n Amazon RDS Custom for Oracle\n

              \n

              The Oracle System ID (SID) of the created RDS Custom DB instance.\n If you don't specify a value, the default value is ORCL.\n

              \n

              Default: ORCL\n

              \n

              Constraints:

              \n
                \n
              • \n

                It must contain 1 to 8 alphanumeric characters.

                \n
              • \n
              • \n

                It must contain a letter.

                \n
              • \n
              • \n

                It can't be a word reserved by the database engine.

                \n
              • \n
              \n

              \n Amazon RDS Custom for SQL Server\n

              \n

              Not applicable. Must be null.

              \n

              \n SQL Server\n

              \n

              Not applicable. Must be null.

              \n

              \n Amazon Aurora MySQL\n

              \n

              The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is\n created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created \n in the DB cluster.

              \n

              Constraints:

              \n
                \n
              • \n

                It must contain 1 to 64 alphanumeric characters.

                \n
              • \n
              • \n

                It can't be a word reserved by the database engine.

                \n
              • \n
              \n

              \n Amazon Aurora PostgreSQL\n

              \n

              The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is\n created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, \n a database named postgres is created in the DB cluster.

              \n

              Constraints:

              \n
                \n
              • \n

                It must contain 1 to 63 alphanumeric characters.

                \n
              • \n
              • \n

                It must begin with a letter or an underscore.\n Subsequent characters can be letters, underscores, or digits\n (0 to 9).

                \n
              • \n
              • \n

                It can't be a word reserved by the\n database engine.

                \n
              • \n
              " } }, "DBInstanceIdentifier": { @@ -2602,7 +2602,7 @@ "AllocatedStorage": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

              The amount of storage in gibibytes (GiB) to allocate for the DB instance.

              \n

              Type: Integer

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. Aurora cluster volumes automatically grow as the amount of data in your \n database increases, though you are only charged for the space that you use in an Aurora cluster volume.

              \n\n

              \n Amazon RDS Custom\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 40 to 65536.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 40 to 65536.

                \n
              • \n
              \n \n

              \n MySQL\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

                \n
              • \n
              • \n

                Magnetic storage (standard): Must be an integer from 5 to 3072.

                \n
              • \n
              \n \n

              \n MariaDB\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

                \n
              • \n
              • \n

                Magnetic storage (standard): Must be an integer from 5 to 3072.

                \n
              • \n
              \n \n

              \n PostgreSQL\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

                \n
              • \n
              • \n

                Magnetic storage (standard): Must be an integer from 5 to 3072.

                \n
              • \n
              \n \n

              \n Oracle\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

                \n
              • \n
              • \n

                Magnetic storage (standard): Must be an integer from 10 to 3072.

                \n
              • \n
              \n \n

              \n SQL Server\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2):

                \n
                  \n
                • \n

                  Enterprise and Standard editions: Must be an integer from 200 to 16384.

                  \n
                • \n
                • \n

                  Web and Express editions: Must be an integer from 20 to 16384.

                  \n
                • \n
                \n
              • \n
              • \n

                Provisioned IOPS storage (io1):

                \n
                  \n
                • \n

                  Enterprise and Standard editions: Must be an integer from 200 to 16384.

                  \n
                • \n
                • \n

                  Web and Express editions: Must be an integer from 100 to 16384.

                  \n
                • \n
                \n
              • \n
              • \n

                Magnetic storage (standard):

                \n
                  \n
                • \n

                  Enterprise and Standard editions: Must be an integer from 200 to 1024.

                  \n
                • \n
                • \n

                  Web and Express editions: Must be an integer from 20 to 1024.

                  \n
                • \n
                \n
              • \n
              " + "smithy.api#documentation": "

              The amount of storage in gibibytes (GiB) to allocate for the DB instance.

              \n

              Type: Integer

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. Aurora cluster volumes automatically grow as the amount of data in your \n database increases, though you are only charged for the space that you use in an Aurora cluster volume.

              \n\n

              \n Amazon RDS Custom\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 40 to 65536 for RDS Custom for Oracle, \n 16384 for RDS Custom for SQL Server.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 40 to 65536 for RDS Custom for Oracle, \n 16384 for RDS Custom for SQL Server.

                \n
              • \n
              \n \n

              \n MySQL\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

                \n
              • \n
              • \n

                Magnetic storage (standard): Must be an integer from 5 to 3072.

                \n
              • \n
              \n \n

              \n MariaDB\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

                \n
              • \n
              • \n

                Magnetic storage (standard): Must be an integer from 5 to 3072.

                \n
              • \n
              \n \n

              \n PostgreSQL\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

                \n
              • \n
              • \n

                Magnetic storage (standard): Must be an integer from 5 to 3072.

                \n
              • \n
              \n \n

              \n Oracle\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2): Must be an integer from 20 to 65536.

                \n
              • \n
              • \n

                Provisioned IOPS storage (io1): Must be an integer from 100 to 65536.

                \n
              • \n
              • \n

                Magnetic storage (standard): Must be an integer from 10 to 3072.

                \n
              • \n
              \n \n

              \n SQL Server\n

              \n

              Constraints to the amount of storage for each storage type are the following:\n

              \n
                \n
              • \n

                General Purpose (SSD) storage (gp2):

                \n
                  \n
                • \n

                  Enterprise and Standard editions: Must be an integer from 20 to 16384.

                  \n
                • \n
                • \n

                  Web and Express editions: Must be an integer from 20 to 16384.

                  \n
                • \n
                \n
              • \n
              • \n

                Provisioned IOPS storage (io1):

                \n
                  \n
                • \n

                  Enterprise and Standard editions: Must be an integer from 100 to 16384.

                  \n
                • \n
                • \n

                  Web and Express editions: Must be an integer from 100 to 16384.

                  \n
                • \n
                \n
              • \n
              • \n

                Magnetic storage (standard):

                \n
                  \n
                • \n

                  Enterprise and Standard editions: Must be an integer from 20 to 1024.

                  \n
                • \n
                • \n

                  Web and Express editions: Must be an integer from 20 to 1024.

                  \n
                • \n
                \n
              • \n
              " } }, "DBInstanceClass": { @@ -2615,7 +2615,7 @@ "Engine": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The name of the database engine to be used for this instance.\n

              \n \n

              Not every database engine is available for every Amazon Web Services Region.\n

              \n\n

              Valid Values:\n

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n custom-oracle-ee (for RDS Custom instances)\n

                \n
              • \n
              • \n

                \n mariadb\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n oracle-ee\n

                \n
              • \n
              • \n

                \n oracle-ee-cdb\n

                \n
              • \n
              • \n

                \n oracle-se2\n

                \n
              • \n
              • \n

                \n oracle-se2-cdb\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              • \n

                \n sqlserver-ee\n

                \n
              • \n
              • \n

                \n sqlserver-se\n

                \n
              • \n
              • \n

                \n sqlserver-ex\n

                \n
              • \n
              • \n

                \n sqlserver-web\n

                \n
              • \n
              ", + "smithy.api#documentation": "

              The name of the database engine to be used for this instance.\n

              \n \n

              Not every database engine is available for every Amazon Web Services Region.\n

              \n\n

              Valid Values:\n

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n custom-oracle-ee (for RDS Custom for Oracle instances)\n

                \n
              • \n
              • \n

                \n custom-sqlserver-ee (for RDS Custom for SQL Server instances)\n

                \n
              • \n
              • \n

                \n custom-sqlserver-se (for RDS Custom for SQL Server instances)\n

                \n
              • \n
              • \n

                \n custom-sqlserver-web (for RDS Custom for SQL Server instances)\n

                \n
              • \n
              • \n

                \n mariadb\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n oracle-ee\n

                \n
              • \n
              • \n

                \n oracle-ee-cdb\n

                \n
              • \n
              • \n

                \n oracle-se2\n

                \n
              • \n
              • \n

                \n oracle-se2-cdb\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              • \n

                \n sqlserver-ee\n

                \n
              • \n
              • \n

                \n sqlserver-se\n

                \n
              • \n
              • \n

                \n sqlserver-ex\n

                \n
              • \n
              • \n

                \n sqlserver-web\n

                \n
              • \n
              ", "smithy.api#required": {} } }, @@ -2646,7 +2646,7 @@ "AvailabilityZone": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              \n The Availability Zone (AZ) where the database will be created. For information on\n Amazon Web Services Regions and Availability Zones, see \n Regions\n and Availability Zones.\n

              \n

              Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region.

              \n

              \n Example: us-east-1d\n

              \n

              \n Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. \n The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint.\n

              \n \n

              If you're creating a DB instance in an RDS on VMware environment,\n specify the identifier of the custom Availability Zone to create the DB instance\n in.

              \n

              For more information about RDS on VMware, see the \n \n RDS on VMware User Guide.\n

              \n
              " + "smithy.api#documentation": "

              \n The Availability Zone (AZ) where the database will be created. For information on\n Amazon Web Services Regions and Availability Zones, see \n Regions\n and Availability Zones.\n

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. Availability Zones are managed by the DB cluster.\n

              \n

              Default: A random, system-chosen Availability Zone in the endpoint's Amazon Web Services Region.

              \n

              \n Example: us-east-1d\n

              \n

              \n Constraint: The AvailabilityZone parameter can't be specified if the DB instance is a Multi-AZ deployment. \n The specified Availability Zone must be in the same Amazon Web Services Region as the current endpoint.\n

              \n \n

              If you're creating a DB instance in an RDS on VMware environment,\n specify the identifier of the custom Availability Zone to create the DB instance\n in.

              \n

              For more information about RDS on VMware, see the \n \n RDS on VMware User Guide.\n

              \n
              " } }, "DBSubnetGroupName": { @@ -2670,7 +2670,7 @@ "BackupRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

              The number of days for which automated backups are retained. Setting this parameter to a positive number enables \n backups. Setting this parameter to 0 disables automated backups.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The retention period for automated backups is managed by the DB cluster.

              \n

              Default: 1

              \n

              Constraints:

              \n
                \n
              • \n

                Must be a value from 0 to 35

                \n
              • \n
              • \n

                Can't be set to 0 if the DB instance is a source to read replicas

                \n
              • \n
              • \n

                Can't be set to 0 or 35 for an RDS Custom DB instance

                \n
              • \n
              " + "smithy.api#documentation": "

              The number of days for which automated backups are retained. Setting this parameter to a positive number enables \n backups. Setting this parameter to 0 disables automated backups.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The retention period for automated backups is managed by the DB cluster.

              \n

              Default: 1

              \n

              Constraints:

              \n
                \n
              • \n

                Must be a value from 0 to 35

                \n
              • \n
              • \n

                Can't be set to 0 if the DB instance is a source to read replicas

                \n
              • \n
              • \n

                Can't be set to 0 or 35 for an RDS Custom for Oracle DB instance

                \n
              • \n
              " } }, "PreferredBackupWindow": { @@ -2694,7 +2694,7 @@ "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The version number of the database engine to use.

              \n

              For a list of valid engine versions, use the DescribeDBEngineVersions action.

              \n

              The following are the database engines and links to information about the major and minor versions that are available with \n Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

              \n \n

              \n Amazon Aurora\n

              \n

              Not applicable. The version number of the database engine to be used by the DB\n instance is managed by the DB cluster.

              \n \n

              \n Amazon RDS Custom\n

              \n

              A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom. The CEV \n name has the following format: 19.customized_string\n . An example identifier is \n 19.my_cev1. For more information, see \n Creating an RDS Custom DB instance in the Amazon RDS User Guide..

              \n \n

              \n MariaDB\n

              \n\n

              For information, see MariaDB on Amazon RDS Versions in the \n Amazon RDS User Guide.\n

              \n \n

              \n Microsoft SQL Server\n

              \n \n

              For information, see Microsoft SQL Server Versions on Amazon RDS in the \n Amazon RDS User Guide.\n

              \n \n

              \n MySQL\n

              \n\n

              For information, see MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.\n

              \n \n

              \n Oracle\n

              \n \n

              For information, see Oracle Database Engine Release Notes in the \n Amazon RDS User Guide.\n

              \n\n

              \n PostgreSQL\n

              \n\n

              For information, see Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.\n

              " + "smithy.api#documentation": "

              The version number of the database engine to use.

              \n

              For a list of valid engine versions, use the DescribeDBEngineVersions action.

              \n

              The following are the database engines and links to information about the major and minor versions that are available with \n Amazon RDS. Not every database engine is available for every Amazon Web Services Region.

              \n \n

              \n Amazon Aurora\n

              \n

              Not applicable. The version number of the database engine to be used by the DB\n instance is managed by the DB cluster.

              \n \n

              \n Amazon RDS Custom for Oracle\n

              \n

              A custom engine version (CEV) that you have previously created. This setting is required for RDS Custom for Oracle. The CEV \n name has the following format: 19.customized_string\n . An example identifier is \n 19.my_cev1. For more information, see \n Creating an RDS Custom for Oracle DB instance in the Amazon RDS User Guide..

              \n \n

              \n Amazon RDS Custom for SQL Server\n

              \n

              See RDS Custom for SQL Server general requirements \n in the Amazon RDS User Guide.\n

              \n \n

              \n MariaDB\n

              \n

              For information, see MariaDB on Amazon RDS Versions in the \n Amazon RDS User Guide.\n

              \n \n

              \n Microsoft SQL Server\n

              \n

              For information, see Microsoft SQL Server Versions on Amazon RDS in the \n Amazon RDS User Guide.\n

              \n \n

              \n MySQL\n

              \n

              For information, see MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.\n

              \n \n

              \n Oracle\n

              \n

              For information, see Oracle Database Engine Release Notes in the \n Amazon RDS User Guide.\n

              \n\n

              \n PostgreSQL\n

              \n

              For information, see Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.\n

              " } }, "AutoMinorVersionUpgrade": { @@ -2772,13 +2772,13 @@ "StorageEncrypted": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

              A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.

              \n

              For RDS Custom Oracle instances, either set this parameter to true or leave it unset. \n If you set this parameter to false, RDS reports an error.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The encryption for DB instances is managed by the DB cluster.

              " + "smithy.api#documentation": "

              A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.

              \n

              For RDS Custom instances, either set this parameter to true or leave it unset. \n If you set this parameter to false, RDS reports an error.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The encryption for DB instances is managed by the DB cluster.

              " } }, "KmsKeyId": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The Amazon Web Services KMS key identifier for an encrypted DB instance.

              \n

              The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The Amazon Web Services KMS key identifier is managed by\n the DB cluster. For more information, see CreateDBCluster.

              \n

              If StorageEncrypted is enabled, and you do\n not specify a value for the KmsKeyId parameter, then\n Amazon RDS uses your default KMS key. There is a \n default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different\n default KMS key for each Amazon Web Services Region.

              \n

              \n Amazon RDS Custom\n

              \n

              A KMS key is required for RDS Custom Oracle instances. For most RDS engines, if you leave this parameter empty \n while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom for Oracle \n doesn't use the default key when this parameter is empty. You must explicitly specify a key.

              " + "smithy.api#documentation": "

              The Amazon Web Services KMS key identifier for an encrypted DB instance.

              \n

              The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The Amazon Web Services KMS key identifier is managed by\n the DB cluster. For more information, see CreateDBCluster.

              \n

              If StorageEncrypted is enabled, and you do\n not specify a value for the KmsKeyId parameter, then\n Amazon RDS uses your default KMS key. There is a \n default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different\n default KMS key for each Amazon Web Services Region.

              \n

              \n Amazon RDS Custom\n

              \n

              A KMS key is required for RDS Custom instances. For most RDS engines, if you leave this parameter empty \n while enabling StorageEncrypted, the engine uses the default KMS key. However, RDS Custom \n doesn't use the default key when this parameter is empty. You must explicitly specify a key.

              " } }, "Domain": { @@ -3216,7 +3216,7 @@ "DBParameterGroupFamily": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

              \n

              To list all of the available parameter group families for a DB engine, use the following command:

              \n

              \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine \n

              \n

              For example, to list all of the available parameter group families for the MySQL DB engine, use the following command:

              \n

              \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql\n

              \n \n

              The output contains duplicates.

              \n
              \n

              The following are the valid DB engine values:

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mariadb\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n oracle-ee\n

                \n
              • \n
              • \n

                \n oracle-ee-cdb\n

                \n
              • \n
              • \n

                \n oracle-se2\n

                \n
              • \n
              • \n

                \n oracle-se2-cdb\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              • \n

                \n sqlserver-ee\n

                \n
              • \n
              • \n

                \n sqlserver-se\n

                \n
              • \n
              • \n

                \n sqlserver-ex\n

                \n
              • \n
              • \n

                \n sqlserver-web\n

                \n
              • \n
              ", + "smithy.api#documentation": "

              The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.

              \n

              To list all of the available parameter group families for a DB engine, use the following command:

              \n

              \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine \n

              \n

              For example, to list all of the available parameter group families for the MySQL DB engine, use the following command:

              \n

              \n aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql\n

              \n \n

              The output contains duplicates.

              \n
              \n

              The following are the valid DB engine values:

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mariadb\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n oracle-ee\n

                \n
              • \n
              • \n

                \n oracle-ee-cdb\n

                \n
              • \n
              • \n

                \n oracle-se2\n

                \n
              • \n
              • \n

                \n oracle-se2-cdb\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              • \n

                \n sqlserver-ee\n

                \n
              • \n
              • \n

                \n sqlserver-se\n

                \n
              • \n
              • \n

                \n sqlserver-ex\n

                \n
              • \n
              • \n

                \n sqlserver-web\n

                \n
              • \n
              ", "smithy.api#required": {} } }, @@ -4421,7 +4421,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The storage type associated with DB instance.

              \n

              This setting is only for non-Aurora Multi-AZ DB clusters.

              " + "smithy.api#documentation": "

              The storage type associated with the DB cluster.

              \n

              This setting is only for non-Aurora Multi-AZ DB clusters.

              " } }, "Iops": { @@ -5153,7 +5153,7 @@ "Status": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              Specifies the status of this DB cluster snapshot.

              " + "smithy.api#documentation": "

              Specifies the status of this DB cluster snapshot. Valid statuses are the following:

              \n
                \n
              • \n

                \n available\n

                \n
              • \n
              • \n

                \n copying\n

                \n
              • \n
              • \n

                \n creating\n

                \n
              • \n
              " } }, "Port": { @@ -9367,7 +9367,7 @@ "Engine": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The database engine to return.

              \n

              Valid Values:\n

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mariadb\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n oracle-ee\n

                \n
              • \n
              • \n

                \n oracle-ee-cdb\n

                \n
              • \n
              • \n

                \n oracle-se2\n

                \n
              • \n
              • \n

                \n oracle-se2-cdb\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              • \n

                \n sqlserver-ee\n

                \n
              • \n
              • \n

                \n sqlserver-se\n

                \n
              • \n
              • \n

                \n sqlserver-ex\n

                \n
              • \n
              • \n

                \n sqlserver-web\n

                \n
              • \n
              " + "smithy.api#documentation": "

              The database engine to return.

              \n

              Valid Values:\n

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mariadb\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n oracle-ee\n

                \n
              • \n
              • \n

                \n oracle-ee-cdb\n

                \n
              • \n
              • \n

                \n oracle-se2\n

                \n
              • \n
              • \n

                \n oracle-se2-cdb\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              • \n

                \n sqlserver-ee\n

                \n
              • \n
              • \n

                \n sqlserver-se\n

                \n
              • \n
              • \n

                \n sqlserver-ex\n

                \n
              • \n
              • \n

                \n sqlserver-web\n

                \n
              • \n
              " } }, "EngineVersion": { @@ -11187,7 +11187,7 @@ "Engine": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The name of the engine to retrieve DB instance options for.

              \n

              Valid Values:\n

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mariadb\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n oracle-ee\n

                \n
              • \n
              • \n

                \n oracle-ee-cdb\n

                \n
              • \n
              • \n

                \n oracle-se2\n

                \n
              • \n
              • \n

                \n oracle-se2-cdb\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              • \n

                \n sqlserver-ee\n

                \n
              • \n
              • \n

                \n sqlserver-se\n

                \n
              • \n
              • \n

                \n sqlserver-ex\n

                \n
              • \n
              • \n

                \n sqlserver-web\n

                \n
              • \n
              ", + "smithy.api#documentation": "

              The name of the engine to retrieve DB instance options for.

              \n

              Valid Values:\n

              \n
                \n
              • \n

                \n aurora (for MySQL 5.6-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora)

                \n
              • \n
              • \n

                \n aurora-postgresql\n

                \n
              • \n
              • \n

                \n mariadb\n

                \n
              • \n
              • \n

                \n mysql\n

                \n
              • \n
              • \n

                \n oracle-ee\n

                \n
              • \n
              • \n

                \n oracle-ee-cdb\n

                \n
              • \n
              • \n

                \n oracle-se2\n

                \n
              • \n
              • \n

                \n oracle-se2-cdb\n

                \n
              • \n
              • \n

                \n postgres\n

                \n
              • \n
              • \n

                \n sqlserver-ee\n

                \n
              • \n
              • \n

                \n sqlserver-se\n

                \n
              • \n
              • \n

                \n sqlserver-ex\n

                \n
              • \n
              • \n

                \n sqlserver-web\n

                \n
              • \n
              ", "smithy.api#required": {} } }, @@ -13732,7 +13732,7 @@ "EngineVersion": { "target": "com.amazonaws.rds#CustomEngineVersion", "traits": { - "smithy.api#documentation": "

              The custom engine version (CEV) that you want to modify. This option is required for \n RDS Custom, but optional for Amazon RDS. The combination of Engine and \n EngineVersion is unique per customer per Amazon Web Services Region.

              ", + "smithy.api#documentation": "

              The custom engine version (CEV) that you want to modify. This option is required for \n RDS Custom for Oracle, but optional for Amazon RDS. The combination of Engine and \n EngineVersion is unique per customer per Amazon Web Services Region.

              ", "smithy.api#required": {} } }, @@ -13950,7 +13950,7 @@ "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The version number of the database engine to which you want to upgrade. \n Changing this parameter results in an outage. The change is applied during\n the next maintenance window unless ApplyImmediately is enabled.

              \n

              To list all of the available engine versions for MySQL 5.6-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for MySQL 5.7-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for Aurora PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for MySQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              " + "smithy.api#documentation": "

              The version number of the database engine to which you want to upgrade. \n Changing this parameter results in an outage. The change is applied during\n the next maintenance window unless ApplyImmediately is enabled.

              \n

              To list all of the available engine versions for MySQL 5.6-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for Aurora PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for MySQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              " } }, "AllowMajorVersionUpgrade": { @@ -14022,7 +14022,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              Specifies the storage type to be associated with the DB cluster.

              \n

              \n Valid values: standard | gp2 | io1\n

              \n

              \n If you specify io1, you must also include a value for the\n Iops parameter.\n

              \n

              \n Default: io1 if the Iops parameter\n is specified, otherwise gp2\n

              \n

              Valid for: Multi-AZ DB clusters only

              " + "smithy.api#documentation": "

              Specifies the storage type to be associated with the DB cluster.

              \n

              \n Valid values: io1\n

              \n

              \n When specified, a value for the Iops parameter is required.\n

              \n

              \n Default: io1\n

              \n

              Valid for: Multi-AZ DB clusters only

              " } }, "Iops": { @@ -14277,7 +14277,7 @@ "DBInstanceClass": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The new compute and memory capacity of the DB instance, for example db.m4.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes,\n and availability for your engine, see\n DB Instance Class in the Amazon RDS User Guide.\n

              \n

              If you modify the DB instance class, an outage occurs during the change.\n The change is applied during the next maintenance window,\n unless ApplyImmediately is enabled for this request.\n

              \n

              This setting doesn't apply to RDS Custom.

              \n

              Default: Uses existing setting

              " + "smithy.api#documentation": "

              The new compute and memory capacity of the DB instance, for example db.m4.large.\n Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.\n For the full list of DB instance classes,\n and availability for your engine, see\n DB Instance Class in the Amazon RDS User Guide.\n

              \n

              If you modify the DB instance class, an outage occurs during the change.\n The change is applied during the next maintenance window,\n unless ApplyImmediately is enabled for this request.\n

              \n

              This setting doesn't apply to RDS Custom for Oracle.

              \n

              Default: Uses existing setting

              " } }, "DBSubnetGroupName": { @@ -14295,13 +14295,13 @@ "VpcSecurityGroupIds": { "target": "com.amazonaws.rds#VpcSecurityGroupIdList", "traits": { - "smithy.api#documentation": "

              A list of Amazon EC2 VPC security groups to authorize on this DB instance. This change is \n asynchronously applied as soon as possible.

              \n

              This setting doesn't apply to RDS Custom.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The associated list of EC2 VPC security groups is managed by\n the DB cluster. For more information, see ModifyDBCluster.

              \n

              Constraints:

              \n
                \n
              • \n

                If supplied, must match existing VpcSecurityGroupIds.

                \n
              • \n
              " + "smithy.api#documentation": "

              A list of Amazon EC2 VPC security groups to authorize on this DB instance. This change is \n asynchronously applied as soon as possible.

              \n

              This setting doesn't apply to RDS Custom.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The associated list of EC2 VPC security groups is managed by\n the DB cluster. For more information, see ModifyDBCluster.

              \n

              Constraints:

              \n
                \n
              • \n

                If supplied, must match existing VpcSecurityGroupIds.

                \n
              • \n
              " } }, "ApplyImmediately": { "target": "com.amazonaws.rds#Boolean", "traits": { - "smithy.api#documentation": "

              A value that indicates whether the modifications in this request and\n any pending modifications are asynchronously applied\n as soon as possible, regardless of the\n PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is \n disabled.\n

              \n

              \n If this parameter is disabled, changes to the\n DB instance are applied during the next maintenance window. Some parameter changes can cause an outage\n and are applied on the next call to RebootDBInstance, or the next failure reboot. \n Review the table of parameters in Modifying a DB Instance \n in the Amazon RDS User Guide. to see the impact of enabling\n or disabling ApplyImmediately for each modified parameter and to determine when the changes are applied.\n

              " + "smithy.api#documentation": "

              A value that indicates whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, \n regardless of the PreferredMaintenanceWindow setting for the DB instance. By default, this parameter is disabled.

              \n

              \n If this parameter is disabled, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage\n and are applied on the next call to RebootDBInstance, or the next failure reboot. Review the table of parameters in \n Modifying a DB Instance in the \n Amazon RDS User Guide to see the impact of enabling or disabling ApplyImmediately for each modified parameter and to \n determine when the changes are applied.\n

              " } }, "MasterUserPassword": { @@ -14319,7 +14319,7 @@ "BackupRetentionPeriod": { "target": "com.amazonaws.rds#IntegerOptional", "traits": { - "smithy.api#documentation": "

              The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

              \n \n

              Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.

              \n
              \n

              These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled\n for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously\n applied as soon as possible.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The retention period for automated backups is managed by the DB\n cluster. For more information, see ModifyDBCluster.

              \n

              Default: Uses existing setting

              \n

              Constraints:

              \n
                \n
              • \n

                It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to \n read replicas. It can't be set to 0 or 35 for an RDS Custom DB instance.

                \n
              • \n
              • \n

                It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or\n later.

                \n
              • \n
              • \n

                It can be specified for a PostgreSQL read replica only if the source is running PostgreSQL\n 9.3.5.

                \n
              • \n
              " + "smithy.api#documentation": "

              The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.

              \n \n

              Enabling and disabling backups can result in a brief I/O suspension that lasts from a few seconds to a few minutes, depending on the size and class of your DB instance.

              \n
              \n

              These changes are applied during the next maintenance window unless the ApplyImmediately parameter is enabled\n for this request. If you change the parameter from one non-zero value to another non-zero value, the change is asynchronously\n applied as soon as possible.

              \n

              \n Amazon Aurora\n

              \n

              Not applicable. The retention period for automated backups is managed by the DB\n cluster. For more information, see ModifyDBCluster.

              \n

              Default: Uses existing setting

              \n

              Constraints:

              \n
                \n
              • \n

                It must be a value from 0 to 35. It can't be set to 0 if the DB instance is a source to \n read replicas. It can't be set to 0 or 35 for an RDS Custom for Oracle DB instance.

                \n
              • \n
              • \n

                It can be specified for a MySQL read replica only if the source is running MySQL 5.6 or\n later.

                \n
              • \n
              • \n

                It can be specified for a PostgreSQL read replica only if the source is running PostgreSQL\n 9.3.5.

                \n
              • \n
              " } }, "PreferredBackupWindow": { @@ -14343,7 +14343,7 @@ "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              \n The version number of the database engine to upgrade to. \n Changing this parameter results in an outage and the change \n is applied during the next maintenance window\n unless the ApplyImmediately parameter is enabled for this request.\n

              \n

              For major version upgrades, if a nondefault DB parameter group is currently in use, a\n new DB parameter group in the DB parameter group family for the new engine version must\n be specified. The new DB parameter group can be the default for that DB parameter group\n family.

              \n

              If you specify only a major version, Amazon RDS will update the DB instance to the \n default minor version if the current minor version is lower.\n For information about valid engine versions, see CreateDBInstance, \n or call DescribeDBEngineVersions.

              \n

              In RDS Custom, this parameter is supported for read replicas only if they are in the \n PATCH_DB_FAILURE lifecycle.\n

              " + "smithy.api#documentation": "

              \n The version number of the database engine to upgrade to. \n Changing this parameter results in an outage and the change \n is applied during the next maintenance window\n unless the ApplyImmediately parameter is enabled for this request.\n

              \n

              For major version upgrades, if a nondefault DB parameter group is currently in use, a\n new DB parameter group in the DB parameter group family for the new engine version must\n be specified. The new DB parameter group can be the default for that DB parameter group\n family.

              \n

              If you specify only a major version, Amazon RDS will update the DB instance to the \n default minor version if the current minor version is lower.\n For information about valid engine versions, see CreateDBInstance, \n or call DescribeDBEngineVersions.

              \n

              In RDS Custom for Oracle, this parameter is supported for read replicas only if they are in the \n PATCH_DB_FAILURE lifecycle.\n

              " } }, "AllowMajorVersionUpgrade": { @@ -14433,7 +14433,7 @@ "PubliclyAccessible": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "

              A value that indicates whether the DB instance is publicly accessible.\n

              \n

              When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint\n resolves to the private IP address from within the DB cluster's virtual private cloud\n (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access\n to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit\n it.

              \n

              When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

              \n

              \n PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a \n public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.\n

              \n

              Changes to the PubliclyAccessible parameter are applied immediately regardless\n of the value of the ApplyImmediately parameter.

              \n

              This setting doesn't apply to RDS Custom.

              " + "smithy.api#documentation": "

              A value that indicates whether the DB instance is publicly accessible.\n

              \n

              When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint\n resolves to the private IP address from within the DB cluster's virtual private cloud\n (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access\n to the DB cluster is ultimately controlled by the security group it uses. That public\n access isn't permitted if the security group assigned to the DB cluster doesn't permit\n it.

              \n

              When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

              \n

              \n PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be part of a \n public subnet and PubliclyAccessible must be enabled for it to be publicly accessible.\n

              \n

              Changes to the PubliclyAccessible parameter are applied immediately regardless\n of the value of the ApplyImmediately parameter.

              " } }, "MonitoringRoleArn": { @@ -14590,7 +14590,7 @@ "Parameters": { "target": "com.amazonaws.rds#ParametersList", "traits": { - "smithy.api#documentation": "

              An array of parameter names, values, and the application methods for the parameter update. At least one parameter name, value, and \n application method method must be supplied; later arguments are optional. A maximum of 20 parameters can be modified in a single request.

              \n

              Valid Values (for the application method): immediate | pending-reboot\n

              \n \n

              You can use the immediate value with dynamic parameters only. You can use the \n pending-reboot value for both dynamic and static parameters.

              \n

              When the application method is immediate, changes to dynamic parameters are applied immediately \n to the DB instances associated with the parameter group. When the application method is pending-reboot, \n changes to dynamic and static parameters are applied after a reboot without failover to the DB instances associated with the \n parameter group.

              \n
              ", + "smithy.api#documentation": "

              An array of parameter names, values, and the application methods for the parameter update. At least one parameter name, value, and \n application method must be supplied; later arguments are optional. A maximum of 20 parameters can be modified in a single request.

              \n

              Valid Values (for the application method): immediate | pending-reboot\n

              \n

              You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic \n and static parameters.

              \n

              When the application method is immediate, changes to dynamic parameters are applied immediately to the DB instances associated with \n the parameter group.

              \n

              When the application method is pending-reboot, changes to dynamic and static parameters are applied after a reboot without failover \n to the DB instances associated with the parameter group.

              \n \n

              You can't use pending-reboot with dynamic parameters on RDS for SQL Server DB instances. Use immediate.

              \n
              \n

              For more information on modifying DB parameters, see Working \n with DB parameter groups in the Amazon RDS User Guide.

              ", "smithy.api#required": {} } } @@ -15121,7 +15121,7 @@ "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The version number of the database engine to which you want to upgrade. \n Changing this parameter results in an outage. The change is applied during\n the next maintenance window unless ApplyImmediately is enabled.

              \n

              To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'\n

              \n

              To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'\n

              \n

              To list all of the available engine versions for aurora-postgresql, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'\n

              " + "smithy.api#documentation": "

              The version number of the database engine to which you want to upgrade. \n Changing this parameter results in an outage. The change is applied during\n the next maintenance window unless ApplyImmediately is enabled.

              \n

              To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'\n

              \n

              To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'\n

              \n

              To list all of the available engine versions for aurora-postgresql, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query '*[]|[?SupportsGlobalDatabases == `true`].[EngineVersion]'\n

              " } }, "AllowMajorVersionUpgrade": { @@ -17630,14 +17630,14 @@ "Engine": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The name of the database engine to be used for this DB cluster.

              \n

              Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible Aurora), and aurora-postgresql \n

              ", + "smithy.api#documentation": "

              The name of the database engine to be used for this DB cluster.

              \n

              Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), and aurora-postgresql \n

              ", "smithy.api#required": {} } }, "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The version number of the database engine to use.

              \n

              To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible Aurora), use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for aurora-postgresql, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              \n Aurora MySQL\n

              \n

              Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5\n

              \n

              \n Aurora PostgreSQL\n

              \n

              Example: 9.6.3, 10.7\n

              " + "smithy.api#documentation": "

              The version number of the database engine to use.

              \n

              To list all of the available engine versions for aurora (for MySQL 5.6-compatible Aurora), use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for aurora-mysql (for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora), use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for aurora-postgresql, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              \n Aurora MySQL\n

              \n

              Example: 5.6.10a, 5.6.mysql_aurora.1.19.2, 5.7.12, 5.7.mysql_aurora.2.04.5, 8.0.mysql_aurora.3.01.0\n

              \n

              \n Aurora PostgreSQL\n

              \n

              Example: 9.6.3, 10.7\n

              " } }, "Port": { @@ -17877,7 +17877,7 @@ "EngineVersion": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              The version of the database engine to use for the new DB cluster.

              \n

              To list all of the available engine versions for MySQL 5.6-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for MySQL 5.7-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for Aurora PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for MySQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              \n Aurora MySQL\n

              \n

              See MySQL on Amazon RDS Versions in the \n Amazon Aurora User Guide.\n

              \n

              \n Aurora PostgreSQL\n

              \n

              See Amazon Aurora PostgreSQL releases and engine versions in the \n Amazon Aurora User Guide.\n

              \n

              \n MySQL\n

              \n

              See MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.\n

              \n

              \n PostgreSQL\n

              \n

              See Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.\n

              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              " + "smithy.api#documentation": "

              The version of the database engine to use for the new DB cluster.

              \n

              To list all of the available engine versions for MySQL 5.6-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for Aurora PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for MySQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              To list all of the available engine versions for RDS for PostgreSQL, use the following command:

              \n

              \n aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"\n

              \n

              \n Aurora MySQL\n

              \n

              See MySQL on Amazon RDS Versions in the \n Amazon Aurora User Guide.\n

              \n

              \n Aurora PostgreSQL\n

              \n

              See Amazon Aurora PostgreSQL releases and engine versions in the \n Amazon Aurora User Guide.\n

              \n

              \n MySQL\n

              \n

              See MySQL on Amazon RDS Versions in the \n Amazon RDS User Guide.\n

              \n

              \n PostgreSQL\n

              \n

              See Amazon RDS for PostgreSQL versions and extensions in the \n Amazon RDS User Guide.\n

              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              " } }, "Port": { @@ -17991,7 +17991,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.

              \n

              \n Valid values: standard | gp2 | io1\n

              \n

              \n If you specify io1, you must also include a value for the\n Iops parameter.\n

              \n

              \n Default: io1 if the Iops parameter\n is specified, otherwise gp2\n

              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              " + "smithy.api#documentation": "

              Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.

              \n

              \n Valid values: io1\n

              \n

              \n When specified, a value for the Iops parameter is required.\n

              \n

              \n Default: io1\n

              \n

              Valid for: Aurora DB clusters and Multi-AZ DB clusters

              " } }, "Iops": { @@ -18224,7 +18224,7 @@ "StorageType": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "

              Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.

              \n

              \n Valid values: standard | gp2 | io1\n

              \n

              \n If you specify io1, also include a value for the\n Iops parameter.\n

              \n

              \n Default: io1 if the Iops parameter\n is specified, otherwise gp2\n

              \n

              Valid for: Multi-AZ DB clusters only

              " + "smithy.api#documentation": "

              Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.

              \n

              \n Valid values: io1\n

              \n

              \n When specified, a value for the Iops parameter is required.\n

              \n

              \n Default: io1\n

              \n

              Valid for: Multi-AZ DB clusters only

              " } }, "PubliclyAccessible": { diff --git a/codegen/sdk-codegen/aws-models/rekognition.json b/codegen/sdk-codegen/aws-models/rekognition.json index 23c6d1050fc1..80c656494a0a 100644 --- a/codegen/sdk-codegen/aws-models/rekognition.json +++ b/codegen/sdk-codegen/aws-models/rekognition.json @@ -805,7 +805,7 @@ "FaceModelVersion": { "target": "com.amazonaws.rekognition#String", "traits": { - "smithy.api#documentation": "

              Version number of the face detection model associated with the collection you are creating.

              " + "smithy.api#documentation": "

              Latest face model being used with the collection. For more information, see Model versioning.

              " } } } @@ -3128,6 +3128,12 @@ "traits": { "smithy.api#documentation": "

              Confidence level that the bounding box contains a face (and not a different object such\n as a tree).

              " } + }, + "IndexFacesModelVersion": { + "target": "com.amazonaws.rekognition#IndexFacesModelVersion", + "traits": { + "smithy.api#documentation": "

              \n The version of the face detect and storage model that was used when indexing the face vector.\n

              " + } } }, "traits": { @@ -4687,7 +4693,13 @@ } ], "traits": { - "smithy.api#documentation": "

              Detects faces in the input image and adds them to the specified collection.

              \n

              Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying\n detection algorithm first detects the faces in the input image. For each face, the algorithm\n extracts facial features into a feature vector, and stores it in the backend database.\n Amazon Rekognition uses feature vectors when it performs face match and search operations using the\n SearchFaces and SearchFacesByImage\n operations.

              \n \n

              For more information, see Adding Faces to a Collection in the Amazon Rekognition\n Developer Guide.

              \n

              To get the number of faces in a collection, call DescribeCollection.

              \n\n

              If you're using version 1.0 of the face detection model, IndexFaces\n indexes the 15 largest faces in the input image. Later versions of the face detection model\n index the 100 largest faces in the input image.

              \n

              If you're using version 4 or later of the face model, image orientation information\n is not returned in the OrientationCorrection field.

              \n

              To determine which version of the model you're using, call DescribeCollection\n and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response\n from IndexFaces\n

              \n \n

              For more information, see Model Versioning in the Amazon Rekognition Developer\n Guide.

              \n

              If you provide the optional ExternalImageId for the input image you\n provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this\n external image ID to create a client-side index to associate the faces with each image. You\n can then use the index to find all faces in an image.

              \n

              You can specify the maximum number of faces to index with the MaxFaces input\n parameter. This is useful when you want to index the largest faces in an image and don't want to index\n smaller faces, such as those belonging to people standing in the background.

              \n

              The QualityFilter input parameter allows you to filter out detected faces\n that don’t meet a required quality bar. The quality bar is based on a\n variety of common use cases. By default, IndexFaces chooses the quality bar that's \n used to filter faces. You can also explicitly choose\n the quality bar. Use QualityFilter, to set the quality bar\n by specifying LOW, MEDIUM, or HIGH.\n If you do not want to filter detected faces, specify NONE.

              \n \n

              To use quality filtering, you need a collection associated with version 3 of the \n face model or higher. To get the version of the face model associated with a collection, call \n DescribeCollection.

              \n
              \n

              Information about faces detected in an image, but not indexed, is returned in an array of\n UnindexedFace objects, UnindexedFaces. Faces aren't\n indexed for reasons such as:

              \n
                \n
              • \n

                The number of faces detected exceeds the value of the MaxFaces request\n parameter.

                \n
              • \n
              • \n

                The face is too small compared to the image dimensions.

                \n
              • \n
              • \n

                The face is too blurry.

                \n
              • \n
              • \n

                The image is too dark.

                \n
              • \n
              • \n

                The face has an extreme pose.

                \n
              • \n
              • \n

                The face doesn’t have enough detail to be suitable for face search.

                \n
              • \n
              \n

              In response, the IndexFaces operation returns an array of metadata for \n all detected faces, FaceRecords. This includes:

              \n
                \n
              • \n

                The bounding box, BoundingBox, of the detected face.

                \n
              • \n
              • \n

                A confidence value, Confidence, which indicates the confidence that the\n bounding box contains a face.

                \n
              • \n
              • \n

                A face ID, FaceId, assigned by the service for each face that's detected\n and stored.

                \n
              • \n
              • \n

                An image ID, ImageId, assigned by the service for the input image.

                \n
              • \n
              \n

              If you request all facial attributes (by using the detectionAttributes\n parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for\n example, location of eye and mouth) and other facial attributes. If you provide\n the same image, specify the same collection, and use the same external ID in the\n IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

              \n \n\n

              \n \n\n

              The input image is passed either as base64-encoded image bytes, or as a reference to an\n image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations,\n passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

              \n

              This operation requires permissions to perform the rekognition:IndexFaces\n action.

              " + "smithy.api#documentation": "

              Detects faces in the input image and adds them to the specified collection.

              \n

              Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying\n detection algorithm first detects the faces in the input image. For each face, the algorithm\n extracts facial features into a feature vector, and stores it in the backend database.\n Amazon Rekognition uses feature vectors when it performs face match and search operations using the\n SearchFaces and SearchFacesByImage\n operations.

              \n \n

              For more information, see Adding Faces to a Collection in the Amazon Rekognition\n Developer Guide.

              \n

              To get the number of faces in a collection, call DescribeCollection.

              \n\n

              If you're using version 1.0 of the face detection model, IndexFaces\n indexes the 15 largest faces in the input image. Later versions of the face detection model\n index the 100 largest faces in the input image.

              \n

              If you're using version 4 or later of the face model, image orientation information\n is not returned in the OrientationCorrection field.

              \n

              To determine which version of the model you're using, call DescribeCollection\n and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response\n from IndexFaces\n

              \n \n

              For more information, see Model Versioning in the Amazon Rekognition Developer\n Guide.

              \n

              If you provide the optional ExternalImageId for the input image you\n provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this\n external image ID to create a client-side index to associate the faces with each image. You\n can then use the index to find all faces in an image.

              \n

              You can specify the maximum number of faces to index with the MaxFaces input\n parameter. This is useful when you want to index the largest faces in an image and don't want to index\n smaller faces, such as those belonging to people standing in the background.

              \n

              The QualityFilter input parameter allows you to filter out detected faces\n that don’t meet a required quality bar. The quality bar is based on a\n variety of common use cases. By default, IndexFaces chooses the quality bar that's \n used to filter faces. You can also explicitly choose\n the quality bar. Use QualityFilter, to set the quality bar\n by specifying LOW, MEDIUM, or HIGH.\n If you do not want to filter detected faces, specify NONE.

              \n \n

              To use quality filtering, you need a collection associated with version 3 of the \n face model or higher. To get the version of the face model associated with a collection, call \n DescribeCollection.

              \n
              \n

              Information about faces detected in an image, but not indexed, is returned in an array of\n UnindexedFace objects, UnindexedFaces. Faces aren't\n indexed for reasons such as:

              \n
                \n
              • \n

                The number of faces detected exceeds the value of the MaxFaces request\n parameter.

                \n
              • \n
              • \n

                The face is too small compared to the image dimensions.

                \n
              • \n
              • \n

                The face is too blurry.

                \n
              • \n
              • \n

                The image is too dark.

                \n
              • \n
              • \n

                The face has an extreme pose.

                \n
              • \n
              • \n

                The face doesn’t have enough detail to be suitable for face search.

                \n
              • \n
              \n

              In response, the IndexFaces operation returns an array of metadata for \n all detected faces, FaceRecords. This includes:

              \n
                \n
              • \n

                The bounding box, BoundingBox, of the detected face.

                \n
              • \n
              • \n

                A confidence value, Confidence, which indicates the confidence that the\n bounding box contains a face.

                \n
              • \n
              • \n

                A face ID, FaceId, assigned by the service for each face that's detected\n and stored.

                \n
              • \n
              • \n

                An image ID, ImageId, assigned by the service for the input image.

                \n
              • \n
              \n

              If you request all facial attributes (by using the detectionAttributes\n parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for\n example, location of eye and mouth) and other facial attributes. If you provide\n the same image, specify the same collection, use the same external ID, and use the same model version in the\n IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata.

              \n \n\n

              \n \n\n

              The input image is passed either as base64-encoded image bytes, or as a reference to an\n image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations,\n passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file.

              \n

              This operation requires permissions to perform the rekognition:IndexFaces\n action.

              " + } + }, + "com.amazonaws.rekognition#IndexFacesModelVersion": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9\\.]+$" } }, "com.amazonaws.rekognition#IndexFacesRequest": { @@ -4751,7 +4763,7 @@ "FaceModelVersion": { "target": "com.amazonaws.rekognition#String", "traits": { - "smithy.api#documentation": "

              The version number of the face detection model that's associated with the input\n collection (CollectionId).

              " + "smithy.api#documentation": "

              Latest face model being used with the collection. For more information, see Model versioning.

              " } }, "UnindexedFaces": { @@ -5351,7 +5363,7 @@ "FaceModelVersions": { "target": "com.amazonaws.rekognition#FaceModelVersionList", "traits": { - "smithy.api#documentation": "

              Version numbers of the face detection models associated with the collections in the array CollectionIds.\n For example, the value of FaceModelVersions[2] is the version number for the face detection model used\n by the collection in CollectionId[2].

              " + "smithy.api#documentation": "

              Latest face models being used with the corresponding collections in the array. For more information, see Model versioning. \n For example, the value of FaceModelVersions[2] is the version number for the face detection model used\n by the collection in CollectionId[2].

              " } } } @@ -5659,7 +5671,7 @@ "FaceModelVersion": { "target": "com.amazonaws.rekognition#String", "traits": { - "smithy.api#documentation": "

              Version number of the face detection model associated with the input collection (CollectionId).

              " + "smithy.api#documentation": "

              Latest face model being used with the collection. For more information, see Model versioning.

              " } } } @@ -7276,7 +7288,7 @@ "FaceModelVersion": { "target": "com.amazonaws.rekognition#String", "traits": { - "smithy.api#documentation": "

              Version number of the face detection model associated with the input collection (CollectionId).

              " + "smithy.api#documentation": "

              Latest face model being used with the collection. For more information, see Model versioning.

              " } } } @@ -7330,7 +7342,7 @@ "FaceModelVersion": { "target": "com.amazonaws.rekognition#String", "traits": { - "smithy.api#documentation": "

              Version number of the face detection model associated with the input collection (CollectionId).

              " + "smithy.api#documentation": "

              Latest face model being used with the collection. For more information, see Model versioning.

              " } } } diff --git a/codegen/sdk-codegen/aws-models/s3-control.json b/codegen/sdk-codegen/aws-models/s3-control.json index a573bbceb618..6226ca45d911 100644 --- a/codegen/sdk-codegen/aws-models/s3-control.json +++ b/codegen/sdk-codegen/aws-models/s3-control.json @@ -6186,7 +6186,7 @@ "target": "com.amazonaws.s3control#S3ExpirationInDays", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

              This argument specifies how long the S3 Glacier or S3 Glacier Deep Archive object remains available in Amazon S3.\n S3 Initiate Restore Object jobs that\n target S3 Glacier and S3 Glacier Deep Archive objects require ExpirationInDays set to 1\n or greater.

              \n

              Conversely, do not set ExpirationInDays when\n creating S3 Initiate Restore Object jobs that target\n S3 Intelligent-Tiering Archive Access and Deep Archive Access tier objects. Objects in\n S3 Intelligent-Tiering archive access tiers are not subject to restore expiry, so\n specifying ExpirationInDays results in restore request failure.

              \n

              S3 Batch Operations jobs can operate either on S3 Glacier and S3 Glacier Deep Archive storage class\n objects or on S3 Intelligent-Tiering Archive Access and Deep Archive Access storage tier\n objects, but not both types in the same job. If you need to restore objects of both types\n you must create separate Batch Operations jobs.

              " + "smithy.api#documentation": "

              This argument specifies how long the S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive object remains available in Amazon S3.\n S3 Initiate Restore Object jobs that\n target S3 Glacier Flexible Retrieval and S3 Glacier Deep Archive objects require ExpirationInDays set to 1\n or greater.

              \n

              Conversely, do not set ExpirationInDays when\n creating S3 Initiate Restore Object jobs that target\n S3 Intelligent-Tiering Archive Access and Deep Archive Access tier objects. Objects in\n S3 Intelligent-Tiering archive access tiers are not subject to restore expiry, so\n specifying ExpirationInDays results in restore request failure.

              \n

              S3 Batch Operations jobs can operate either on S3 Glacier Flexible Retrieval and S3 Glacier Deep Archive storage class\n objects or on S3 Intelligent-Tiering Archive Access and Deep Archive Access storage tier\n objects, but not both types in the same job. If you need to restore objects of both types\n you must create separate Batch Operations jobs.

              " } }, "GlacierJobTier": { diff --git a/codegen/sdk-codegen/aws-models/s3.json b/codegen/sdk-codegen/aws-models/s3.json index 52e45df95ac2..923485f488a8 100644 --- a/codegen/sdk-codegen/aws-models/s3.json +++ b/codegen/sdk-codegen/aws-models/s3.json @@ -1877,7 +1877,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.

              \n

              Not every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming rules.

              \n

              If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

              \n

              By default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. You might choose a Region to optimize\n latency, minimize costs, or address regulatory requirements. For example, if you reside in\n Europe, you will probably find it advantageous to create buckets in the Europe (Ireland)\n Region. For more information, see Accessing a\n bucket.

              \n \n

              If you send your create bucket request to the s3.amazonaws.com endpoint,\n the request goes to the us-east-1 Region. Accordingly, the signature calculations in\n Signature Version 4 must use us-east-1 as the Region, even if the location constraint in\n the request specifies another Region where the bucket is to be created. If you create a\n bucket in a Region other than US East (N. Virginia), your application must be able to\n handle 307 redirect. For more information, see Virtual hosting of buckets.

              \n
              \n

              \n Access control lists (ACLs)\n

              \n

              When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or\n groups that should be granted specific permissions on the bucket.

              \n \n

              If your CreateBucket request includes the BucketOwnerEnforced value for\n the x-amz-object-ownership header, your request can either not specify\n an ACL or specify bucket owner full control ACLs, such as the bucket-owner-full-control\n canned ACL or an equivalent ACL expressed in the XML format. For\n more information, see Controlling object\n ownership in the Amazon S3 User Guide.

              \n
              \n

              There are two ways to grant the appropriate permissions using the request headers.

              \n
                \n
              • \n

                Specify a canned ACL using the x-amz-acl request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each\n canned ACL has a predefined set of grantees and permissions. For more information,\n see Canned ACL.

                \n
              • \n
              • \n

                Specify access permissions explicitly using the x-amz-grant-read,\n x-amz-grant-write, x-amz-grant-read-acp,\n x-amz-grant-write-acp, and x-amz-grant-full-control\n headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For\n more information, see Access control list\n (ACL) overview.

                \n

                You specify each grantee as a type=value pair, where the type is one of the\n following:

                \n
                  \n
                • \n

                  \n id – if the value specified is the canonical user ID of an Amazon Web Services account

                  \n
                • \n
                • \n

                  \n uri – if you are granting permissions to a predefined\n group

                  \n
                • \n
                • \n

                  \n emailAddress – if the value specified is the email address of\n an Amazon Web Services account

                  \n \n

                  Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                  \n
                    \n
                  • \n

                    US East (N. Virginia)

                    \n
                  • \n
                  • \n

                    US West (N. California)

                    \n
                  • \n
                  • \n

                    US West (Oregon)

                    \n
                  • \n
                  • \n

                    Asia Pacific (Singapore)

                    \n
                  • \n
                  • \n

                    Asia Pacific (Sydney)

                    \n
                  • \n
                  • \n

                    Asia Pacific (Tokyo)

                    \n
                  • \n
                  • \n

                    Europe (Ireland)

                    \n
                  • \n
                  • \n

                    South America (São Paulo)

                    \n
                  • \n
                  \n

                  For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                  \n
                  \n
                • \n
                \n

                For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

                \n

                \n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" \n

                \n
              • \n
              \n \n

              You can use either a canned ACL or specify access permissions explicitly. You cannot\n do both.

              \n
              \n\n

              \n Permissions\n

              \n

              In addition to s3:CreateBucket, the following permissions are required when your CreateBucket includes specific headers:

              \n
                \n
              • \n

                \n ACLs - If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, \n authenticated-read, or if you specify access permissions explicitly through any other ACL, both \n s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the \n CreateBucket request is private or doesn't specify any ACLs, only s3:CreateBucket permission is needed.

                \n
              • \n
              • \n

                \n Object Lock - If\n ObjectLockEnabledForBucket is set to true in your\n CreateBucket request,\n s3:PutBucketObjectLockConfiguration and\n s3:PutBucketVersioning permissions are required.

                \n
              • \n
              • \n

                \n S3 Object Ownership - If your CreateBucket\n request includes the the x-amz-object-ownership header,\n s3:PutBucketOwnershipControls permission is required.

                \n
              • \n
              \n

              The following operations are related to CreateBucket:

              \n ", + "smithy.api#documentation": "

              Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.

              \n

              Not every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming rules.

              \n

              If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

              \n

              By default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. You might choose a Region to optimize\n latency, minimize costs, or address regulatory requirements. For example, if you reside in\n Europe, you will probably find it advantageous to create buckets in the Europe (Ireland)\n Region. For more information, see Accessing a\n bucket.

              \n \n

              If you send your create bucket request to the s3.amazonaws.com endpoint,\n the request goes to the us-east-1 Region. Accordingly, the signature calculations in\n Signature Version 4 must use us-east-1 as the Region, even if the location constraint in\n the request specifies another Region where the bucket is to be created. If you create a\n bucket in a Region other than US East (N. Virginia), your application must be able to\n handle 307 redirect. For more information, see Virtual hosting of buckets.

              \n
              \n

              \n Access control lists (ACLs)\n

              \n

              When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or\n groups that should be granted specific permissions on the bucket.

              \n \n

              If your CreateBucket request sets bucket owner enforced for S3 Object Ownership and\n specifies a bucket ACL that provides access to an external Amazon Web Services account, your request\n fails with a 400 error and returns the\n InvalidBucketAclWithObjectOwnership error code. For more information,\n see Controlling object\n ownership in the Amazon S3 User Guide.

              \n
              \n

              There are two ways to grant the appropriate permissions using the request headers.

              \n
                \n
              • \n

                Specify a canned ACL using the x-amz-acl request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each\n canned ACL has a predefined set of grantees and permissions. For more information,\n see Canned ACL.

                \n
              • \n
              • \n

                Specify access permissions explicitly using the x-amz-grant-read,\n x-amz-grant-write, x-amz-grant-read-acp,\n x-amz-grant-write-acp, and x-amz-grant-full-control\n headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For\n more information, see Access control list\n (ACL) overview.

                \n

                You specify each grantee as a type=value pair, where the type is one of the\n following:

                \n
                  \n
                • \n

                  \n id – if the value specified is the canonical user ID of an Amazon Web Services account

                  \n
                • \n
                • \n

                  \n uri – if you are granting permissions to a predefined\n group

                  \n
                • \n
                • \n

                  \n emailAddress – if the value specified is the email address of\n an Amazon Web Services account

                  \n \n

                  Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                  \n
                    \n
                  • \n

                    US East (N. Virginia)

                    \n
                  • \n
                  • \n

                    US West (N. California)

                    \n
                  • \n
                  • \n

                    US West (Oregon)

                    \n
                  • \n
                  • \n

                    Asia Pacific (Singapore)

                    \n
                  • \n
                  • \n

                    Asia Pacific (Sydney)

                    \n
                  • \n
                  • \n

                    Asia Pacific (Tokyo)

                    \n
                  • \n
                  • \n

                    Europe (Ireland)

                    \n
                  • \n
                  • \n

                    South America (São Paulo)

                    \n
                  • \n
                  \n

                  For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                  \n
                  \n
                • \n
                \n

                For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

                \n

                \n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" \n

                \n
              • \n
              \n \n

              You can use either a canned ACL or specify access permissions explicitly. You cannot\n do both.

              \n
              \n\n

              \n Permissions\n

              \n

              In addition to s3:CreateBucket, the following permissions are required when your CreateBucket includes specific headers:

              \n
                \n
              • \n

                \n ACLs - If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, \n authenticated-read, or if you specify access permissions explicitly through any other ACL, both \n s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the \n CreateBucket request is private or doesn't specify any ACLs, only s3:CreateBucket permission is needed.

                \n
              • \n
              • \n

                \n Object Lock - If\n ObjectLockEnabledForBucket is set to true in your\n CreateBucket request,\n s3:PutBucketObjectLockConfiguration and\n s3:PutBucketVersioning permissions are required.

                \n
              • \n
              • \n

                \n S3 Object Ownership - If your CreateBucket\n request includes the the x-amz-object-ownership header,\n s3:PutBucketOwnershipControls permission is required.

                \n
              • \n
              \n

              The following operations are related to CreateBucket:

              \n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}", @@ -6097,7 +6097,7 @@ "Range": { "target": "com.amazonaws.s3#Range", "traits": { - "smithy.api#documentation": "

              Downloads the specified range bytes of an object. For more information about the HTTP\n Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.

              \n \n

              Amazon S3 doesn't support retrieving multiple ranges of data per GET\n request.

              \n
              ", + "smithy.api#documentation": "

              Because HeadObject returns only the metadata for an object, this parameter\n has no effect.

              ", "smithy.api#httpHeader": "Range" } }, @@ -11952,7 +11952,7 @@ "target": "com.amazonaws.s3#SelectObjectContentOutput" }, "traits": { - "smithy.api#documentation": "

              This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

              \n

              This action is not supported by Amazon S3 on Outposts.

              \n

              For more information about Amazon S3 Select,\n see Selecting Content from\n Objects in the Amazon S3 User Guide.

              \n

              For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select\n and S3 Glacier Select in the Amazon S3 User Guide.

              \n

              \n

              \n Permissions\n

              \n

              You must have s3:GetObject permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy\n in the Amazon S3 User Guide.

              \n

              \n

              \n Object Data Formats\n

              \n

              You can use Amazon S3 Select to query objects that have the following format\n properties:

              \n
                \n
              • \n

                \n CSV, JSON, and Parquet - Objects must be in CSV, JSON, or\n Parquet format.

                \n
              • \n
              • \n

                \n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.

                \n
              • \n
              • \n

                \n GZIP or BZIP2 - CSV and JSON files can be compressed using\n GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select\n supports for CSV and JSON files. Amazon S3 Select supports columnar compression for\n Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression\n for Parquet objects.

                \n
              • \n
              • \n

                \n Server-side encryption - Amazon S3 Select supports querying\n objects that are protected with server-side encryption.

                \n

                For objects that are encrypted with customer-provided encryption keys (SSE-C), you\n must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.

                \n

                For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and\n Amazon Web Services KMS keys (SSE-KMS),\n server-side encryption is handled transparently, so you don't need to specify\n anything. For more information about server-side encryption, including SSE-S3 and\n SSE-KMS, see Protecting Data Using\n Server-Side Encryption in the Amazon S3 User Guide.

                \n
              • \n
              \n\n

              \n Working with the Response Body\n

              \n

              Given the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding header with chunked as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.

              \n\n

              \n

              \n GetObject Support\n

              \n

              The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

              \n
                \n
              • \n

                \n Range: Although you can specify a scan range for an Amazon S3 Select request\n (see SelectObjectContentRequest - ScanRange in the request parameters),\n you cannot specify the range of bytes of an object to return.

                \n
              • \n
              • \n

                GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify\n the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For\n more information, about storage classes see Storage Classes\n in the Amazon S3 User Guide.

                \n
              • \n
              \n

              \n

              \n Special Errors\n

              \n\n

              For a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n

              \n

              \n Related Resources\n

              \n ", + "smithy.api#documentation": "

              This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.

              \n

              This action is not supported by Amazon S3 on Outposts.

              \n

              For more information about Amazon S3 Select,\n see Selecting Content from\n Objects and SELECT\n Command in the Amazon S3 User Guide.

              \n

              For more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select\n and S3 Glacier Select in the Amazon S3 User Guide.

              \n

              \n

              \n Permissions\n

              \n

              You must have s3:GetObject permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy\n in the Amazon S3 User Guide.

              \n

              \n

              \n Object Data Formats\n

              \n

              You can use Amazon S3 Select to query objects that have the following format\n properties:

              \n
                \n
              • \n

                \n CSV, JSON, and Parquet - Objects must be in CSV, JSON, or\n Parquet format.

                \n
              • \n
              • \n

                \n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.

                \n
              • \n
              • \n

                \n GZIP or BZIP2 - CSV and JSON files can be compressed using\n GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select\n supports for CSV and JSON files. Amazon S3 Select supports columnar compression for\n Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression\n for Parquet objects.

                \n
              • \n
              • \n

                \n Server-side encryption - Amazon S3 Select supports querying\n objects that are protected with server-side encryption.

                \n

                For objects that are encrypted with customer-provided encryption keys (SSE-C), you\n must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.

                \n

                For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and\n Amazon Web Services KMS keys (SSE-KMS),\n server-side encryption is handled transparently, so you don't need to specify\n anything. For more information about server-side encryption, including SSE-S3 and\n SSE-KMS, see Protecting Data Using\n Server-Side Encryption in the Amazon S3 User Guide.

                \n
              • \n
              \n\n

              \n Working with the Response Body\n

              \n

              Given the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding header with chunked as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.

              \n\n

              \n

              \n GetObject Support\n

              \n

              The SelectObjectContent action does not support the following\n GetObject functionality. For more information, see GetObject.

              \n
                \n
              • \n

                \n Range: Although you can specify a scan range for an Amazon S3 Select request\n (see SelectObjectContentRequest - ScanRange in the request parameters),\n you cannot specify the range of bytes of an object to return.

                \n
              • \n
              • \n

                GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify\n the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. For\n more information, about storage classes see Storage Classes\n in the Amazon S3 User Guide.

                \n
              • \n
              \n

              \n

              \n Special Errors\n

              \n\n

              For a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n

              \n

              \n Related Resources\n

              \n ", "smithy.api#http": { "method": "POST", "uri": "/{Bucket}/{Key+}?select&select-type=2&x-id=SelectObjectContent", @@ -12167,12 +12167,12 @@ "KMSMasterKeyID": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "

              Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default\n encryption. This parameter is allowed if and only if SSEAlgorithm is set to\n aws:kms.

              \n

              You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if you\n are using encryption with cross-account operations, you must use a fully qualified KMS key ARN.\n For more information, see Using encryption for cross-account operations.

              \n

              \n For example:\n

              \n
                \n
              • \n

                Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

                \n
              • \n
              • \n

                Key ARN:\n arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

                \n
              • \n
              \n \n

              Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For more information, see\n Using symmetric and\n asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.

              \n
              " + "smithy.api#documentation": "

              Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default\n encryption. This parameter is allowed if and only if SSEAlgorithm is set to\n aws:kms.

              \n

              You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if\n you are using encryption with cross-account or Amazon Web Services service operations you must use a fully qualified KMS\n key ARN. For more information, see Using encryption for cross-account operations.

              \n

              \n For example:\n

              \n
                \n
              • \n

                Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab\n

                \n
              • \n
              • \n

                Key ARN:\n arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\n

                \n
              • \n
              \n \n

              Amazon S3 only supports symmetric KMS keys and not asymmetric KMS keys. For more information, see\n Using symmetric and\n asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.

              \n
              " } } }, "traits": { - "smithy.api#documentation": "

              Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. For more information, see PUT Bucket encryption in\n the Amazon S3 API Reference.

              " + "smithy.api#documentation": "

              Describes the default server-side encryption to apply to new objects in the bucket. If a\n PUT Object request doesn't specify any server-side encryption, this default encryption will\n be applied. If you don't specify a customer managed key at configuration, Amazon S3 automatically creates \n an Amazon Web Services KMS key in your Amazon Web Services account the first time that you add an object encrypted with\n SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption in\n the Amazon S3 API Reference.

              " } }, "com.amazonaws.s3#ServerSideEncryptionConfiguration": { diff --git a/codegen/sdk-codegen/aws-models/sagemaker.json b/codegen/sdk-codegen/aws-models/sagemaker.json index 5331c46b356b..be39693a511c 100644 --- a/codegen/sdk-codegen/aws-models/sagemaker.json +++ b/codegen/sdk-codegen/aws-models/sagemaker.json @@ -1903,7 +1903,7 @@ "ContentType": { "target": "com.amazonaws.sagemaker#ContentType", "traits": { - "smithy.api#documentation": "

              The content type of the data from the input source. You can use\n text/csv;header=present or x-application/vnd.amazon+parquet.\n The default value is text/csv;header=present.

              " + "smithy.api#documentation": "

              The content type of the data from the input source. You can use\n text/csv;header=present or x-application/vnd.amazon+parquet.\n The default value is text/csv;header=present.

              " } } }, @@ -2730,6 +2730,16 @@ "smithy.api#pattern": "^[^ ~^:?*\\[]+$" } }, + "com.amazonaws.sagemaker#BucketName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 63 + }, + "smithy.api#pattern": "^[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9]$" + } + }, "com.amazonaws.sagemaker#CacheHitResult": { "type": "structure", "members": { @@ -5131,7 +5141,7 @@ "smithy.api#deprecated": { "message": "This property is deprecated, use KmsKeyId instead." }, - "smithy.api#documentation": "

              This member is deprecated and replaced with KmsKeyId.

              " + "smithy.api#documentation": "

              Use KmsKeyId.

              " } }, "KmsKeyId": { @@ -6900,8 +6910,13 @@ "PipelineDefinition": { "target": "com.amazonaws.sagemaker#PipelineDefinition", "traits": { - "smithy.api#documentation": "

              The JSON pipeline definition of the pipeline.

              ", - "smithy.api#required": {} + "smithy.api#documentation": "

              The JSON pipeline definition of the pipeline.

              " + } + }, + "PipelineDefinitionS3Location": { + "target": "com.amazonaws.sagemaker#PipelineDefinitionS3Location", + "traits": { + "smithy.api#documentation": "

              The location of the pipeline definition stored in Amazon S3. If specified, \n SageMaker will retrieve the pipeline definition from this location.

              " } }, "PipelineDescription": { @@ -6930,6 +6945,12 @@ "traits": { "smithy.api#documentation": "

              A list of tags to apply to the created pipeline.

              " } + }, + "ParallelismConfiguration": { + "target": "com.amazonaws.sagemaker#ParallelismConfiguration", + "traits": { + "smithy.api#documentation": "

              This is the configuration that controls the parallelism of the pipeline. \n If specified, it applies to all runs of this pipeline by default.

              " + } } } }, @@ -11169,7 +11190,7 @@ "smithy.api#deprecated": { "message": "This property is deprecated, use KmsKeyId instead." }, - "smithy.api#documentation": "

              This member is deprecated and replaced with KmsKeyId.

              " + "smithy.api#documentation": "

              Use KmsKeyId.

              " } }, "SubnetIds": { @@ -13961,6 +13982,12 @@ }, "LastModifiedBy": { "target": "com.amazonaws.sagemaker#UserContext" + }, + "ParallelismConfiguration": { + "target": "com.amazonaws.sagemaker#ParallelismConfiguration", + "traits": { + "smithy.api#documentation": "

              The parallelism configuration applied to the pipeline.

              " + } } } }, @@ -14044,6 +14071,12 @@ }, "LastModifiedBy": { "target": "com.amazonaws.sagemaker#UserContext" + }, + "ParallelismConfiguration": { + "target": "com.amazonaws.sagemaker#ParallelismConfiguration", + "traits": { + "smithy.api#documentation": "

              Lists the parallelism configuration applied to the pipeline.

              " + } } } }, @@ -16102,6 +16135,38 @@ "smithy.api#documentation": "

              Represents the drift check model quality baselines that can be used when the model monitor is set using \n the model package.

              " } }, + "com.amazonaws.sagemaker#EMRStepMetadata": { + "type": "structure", + "members": { + "ClusterId": { + "target": "com.amazonaws.sagemaker#String256", + "traits": { + "smithy.api#documentation": "

              The identifier of the EMR cluster.

              " + } + }, + "StepId": { + "target": "com.amazonaws.sagemaker#String256", + "traits": { + "smithy.api#documentation": "

              The identifier of the EMR cluster step.

              " + } + }, + "StepName": { + "target": "com.amazonaws.sagemaker#String256", + "traits": { + "smithy.api#documentation": "

              The name of the EMR cluster step.

              " + } + }, + "LogFilePath": { + "target": "com.amazonaws.sagemaker#String1024", + "traits": { + "smithy.api#documentation": "

              The path to the log file where the cluster step's failure root cause \n is recorded.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              The configurations and outcomes of an Amazon EMR step execution.

              " + } + }, "com.amazonaws.sagemaker#Edge": { "type": "structure", "members": { @@ -20053,7 +20118,7 @@ "FrameworkVersion": { "target": "com.amazonaws.sagemaker#FrameworkVersion", "traits": { - "smithy.api#documentation": "

              Specifies the framework version to use.

              \n

              This API field is only supported for PyTorch framework versions 1.4, \n 1.5, and 1.6 for \n cloud instance target devices: ml_c4, ml_c5, ml_m4, \n ml_m5, ml_p2, ml_p3, and ml_g4dn.

              " + "smithy.api#documentation": "

              Specifies the framework version to use. This API field is only supported for the PyTorch and TensorFlow frameworks.

              \n

              For information about framework versions supported for cloud targets and edge devices, see \n Cloud Supported Instance Types and Frameworks and \n Edge Supported Frameworks.

              " } } }, @@ -20643,6 +20708,16 @@ } } }, + "com.amazonaws.sagemaker#Key": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^.+$" + } + }, "com.amazonaws.sagemaker#KmsKeyId": { "type": "string", "traits": { @@ -26529,6 +26604,14 @@ } } }, + "com.amazonaws.sagemaker#MaxParallelExecutionSteps": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1 + } + } + }, "com.amazonaws.sagemaker#MaxParallelOfTests": { "type": "integer", "traits": { @@ -30116,13 +30199,28 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.sagemaker#ParallelismConfiguration": { + "type": "structure", + "members": { + "MaxParallelExecutionSteps": { + "target": "com.amazonaws.sagemaker#MaxParallelExecutionSteps", + "traits": { + "smithy.api#documentation": "

              The max number of steps that can be executed in parallel.

              ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

              Configuration that controls the parallelism of the pipeline. \n By default, the parallelism configuration specified applies to all \n executions of the pipeline unless overridden.

              " + } + }, "com.amazonaws.sagemaker#Parameter": { "type": "structure", "members": { "Name": { "target": "com.amazonaws.sagemaker#PipelineParameterName", "traits": { - "smithy.api#documentation": "

              The name of the parameter to assign a value to. This parameter name must match a named parameter in the pipeline definition.

              ", + "smithy.api#documentation": "

              The name of the parameter to assign a value to. This \n parameter name must match a named parameter in the \n pipeline definition.

              ", "smithy.api#required": {} } }, @@ -30532,6 +30630,12 @@ "LastModifiedBy": { "target": "com.amazonaws.sagemaker#UserContext" }, + "ParallelismConfiguration": { + "target": "com.amazonaws.sagemaker#ParallelismConfiguration", + "traits": { + "smithy.api#documentation": "

              The parallelism configuration applied to the pipeline.

              " + } + }, "Tags": { "target": "com.amazonaws.sagemaker#TagList", "traits": { @@ -30563,6 +30667,34 @@ "smithy.api#pattern": "(?:[ \\r\\n\\t].*)*$" } }, + "com.amazonaws.sagemaker#PipelineDefinitionS3Location": { + "type": "structure", + "members": { + "Bucket": { + "target": "com.amazonaws.sagemaker#BucketName", + "traits": { + "smithy.api#documentation": "

              Name of the S3 bucket.

              ", + "smithy.api#required": {} + } + }, + "ObjectKey": { + "target": "com.amazonaws.sagemaker#Key", + "traits": { + "smithy.api#documentation": "

              The object key (or key name) uniquely identifies the \n object in an S3 bucket.

              ", + "smithy.api#required": {} + } + }, + "VersionId": { + "target": "com.amazonaws.sagemaker#VersionId", + "traits": { + "smithy.api#documentation": "

              Version Id of the pipeline definition file. If not specified, Amazon SageMaker \n will retrieve the latest version.

              " + } + } + }, + "traits": { + "smithy.api#documentation": "

              The location of the pipeline definition stored in Amazon S3.

              " + } + }, "com.amazonaws.sagemaker#PipelineDescription": { "type": "string", "traits": { @@ -30633,6 +30765,12 @@ "LastModifiedBy": { "target": "com.amazonaws.sagemaker#UserContext" }, + "ParallelismConfiguration": { + "target": "com.amazonaws.sagemaker#ParallelismConfiguration", + "traits": { + "smithy.api#documentation": "

              The parallelism configuration applied to the pipeline execution.

              " + } + }, "PipelineParameters": { "target": "com.amazonaws.sagemaker#ParameterList", "traits": { @@ -30720,6 +30858,18 @@ "smithy.api#documentation": "

              The name of the step that is executed.

              " } }, + "StepDisplayName": { + "target": "com.amazonaws.sagemaker#StepDisplayName", + "traits": { + "smithy.api#documentation": "

              The display name of the step.

              " + } + }, + "StepDescription": { + "target": "com.amazonaws.sagemaker#StepDescription", + "traits": { + "smithy.api#documentation": "

              The description of the step.

              " + } + }, "StartTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { @@ -30745,7 +30895,10 @@ } }, "AttemptCount": { - "target": "com.amazonaws.sagemaker#IntegerValue" + "target": "com.amazonaws.sagemaker#IntegerValue", + "traits": { + "smithy.api#documentation": "

              The current attempt of the execution step. For more information, see Retry Policy for Amazon SageMaker Pipelines steps.

              " + } }, "FailureReason": { "target": "com.amazonaws.sagemaker#FailureReason", @@ -30844,6 +30997,12 @@ "traits": { "smithy.api#documentation": "

              Container for the metadata for a Clarify check step. The configurations \n and outcomes of the check step execution. This includes:

              \n
                \n
              • \n

                The type of the check conducted,

                \n
              • \n
              • \n

                The Amazon S3 URIs of baseline constraints and statistics files to be used for the drift check.

                \n
              • \n
              • \n

                The Amazon S3 URIs of newly calculated baseline constraints and statistics.

                \n
              • \n
              • \n

                The model package group name provided.

                \n
              • \n
              • \n

                The Amazon S3 URI of the violation report if violations detected.

                \n
              • \n
              • \n

                The Amazon Resource Name (ARN) of check processing job initiated by the step execution.

                \n
              • \n
              • \n

                The boolean flags indicating if the drift check is skipped.

                \n
              • \n
              • \n

                If step property BaselineUsedForDriftCheck is set the same as \n CalculatedBaseline.

                \n
              • \n
              " } + }, + "EMR": { + "target": "com.amazonaws.sagemaker#EMRStepMetadata", + "traits": { + "smithy.api#documentation": "

              The configurations and outcomes of an EMR step execution.

              " + } } }, "traits": { @@ -34322,6 +34481,12 @@ "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } + }, + "ParallelismConfiguration": { + "target": "com.amazonaws.sagemaker#ParallelismConfiguration", + "traits": { + "smithy.api#documentation": "

              This configuration, if specified, overrides the parallelism configuration \n of the parent pipeline.

              " + } } } }, @@ -36425,6 +36590,12 @@ "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } + }, + "ParallelismConfiguration": { + "target": "com.amazonaws.sagemaker#ParallelismConfiguration", + "traits": { + "smithy.api#documentation": "

              This configuration, if specified, overrides the parallelism configuration \n of the parent pipeline for this specific run.

              " + } } } }, @@ -36452,7 +36623,17 @@ "com.amazonaws.sagemaker#StatusMessage": { "type": "string" }, - "com.amazonaws.sagemaker#StepName": { + "com.amazonaws.sagemaker#StepDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 3072 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.sagemaker#StepDisplayName": { "type": "string", "traits": { "smithy.api#length": { @@ -36462,6 +36643,16 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.sagemaker#StepName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 64 + }, + "smithy.api#pattern": "^[A-Za-z0-9\\-_]*$" + } + }, "com.amazonaws.sagemaker#StepStatus": { "type": "string", "traits": { @@ -36705,7 +36896,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Stops a pipeline execution.

              \n\n

              \n Callback Step\n

              \n

              A pipeline execution won't stop while a callback step is running.\n When you call StopPipelineExecution\n on a pipeline execution with a running callback step, SageMaker Pipelines sends an\n additional Amazon SQS message to the specified SQS queue. The body of the SQS message\n contains a \"Status\" field which is set to \"Stopping\".

              \n

              You should add logic to your Amazon SQS message consumer to take any needed action (for\n example, resource cleanup) upon receipt of the message followed by a call to\n SendPipelineExecutionStepSuccess or\n SendPipelineExecutionStepFailure.

              \n

              Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.

              \n\n

              \n Lambda Step\n

              \n

              A pipeline execution can't be stopped while a lambda step is running because the Lambda\n function invoked by the lambda step can't be stopped. If you attempt to stop the execution\n while the Lambda function is running, the pipeline waits for the Lambda function to finish\n or until the timeout is hit, whichever occurs first, and then stops. If the Lambda function\n finishes, the pipeline execution status is Stopped. If the timeout is hit\n the pipeline execution status is Failed.

              " + "smithy.api#documentation": "

              Stops a pipeline execution.

              \n\n

              \n Callback Step\n

              \n

              A pipeline execution won't stop while a callback step is running.\n When you call StopPipelineExecution\n on a pipeline execution with a running callback step, Amazon SageMaker Pipelines sends an\n additional Amazon SQS message to the specified SQS queue. The body of the SQS message\n contains a \"Status\" field which is set to \"Stopping\".

              \n

              You should add logic to your Amazon SQS message consumer to take any needed action (for\n example, resource cleanup) upon receipt of the message followed by a call to\n SendPipelineExecutionStepSuccess or\n SendPipelineExecutionStepFailure.

              \n

              Only when Amazon SageMaker Pipelines receives one of these calls will it stop the pipeline execution.

              \n\n

              \n Lambda Step\n

              \n

              A pipeline execution can't be stopped while a lambda step is running because the Lambda\n function invoked by the lambda step can't be stopped. If you attempt to stop the execution\n while the Lambda function is running, the pipeline waits for the Lambda function to finish\n or until the timeout is hit, whichever occurs first, and then stops. If the Lambda function\n finishes, the pipeline execution status is Stopped. If the timeout is hit\n the pipeline execution status is Failed.

              " } }, "com.amazonaws.sagemaker#StopPipelineExecutionRequest": { @@ -36802,7 +36993,7 @@ } ], "traits": { - "smithy.api#documentation": "

              Stops a transform job.

              \n

              When Amazon SageMaker receives a StopTransformJob request, the status of the job\n changes to Stopping. After Amazon SageMaker\n stops\n the job, the status is set to Stopped. When you stop a transform job before\n it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.

              " + "smithy.api#documentation": "

              Stops a batch transform job.

              \n

              When Amazon SageMaker receives a StopTransformJob request, the status of the job\n changes to Stopping. After Amazon SageMaker\n stops\n the job, the status is set to Stopped. When you stop a batch transform job before\n it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.

              " } }, "com.amazonaws.sagemaker#StopTransformJobRequest": { @@ -36811,7 +37002,7 @@ "TransformJobName": { "target": "com.amazonaws.sagemaker#TransformJobName", "traits": { - "smithy.api#documentation": "

              The name of the transform job to stop.

              ", + "smithy.api#documentation": "

              The name of the batch transform job to stop.

              ", "smithy.api#required": {} } } @@ -37928,6 +38119,38 @@ { "value": "ml.c5n.18xlarge", "name": "ML_C5N_18XLARGE" + }, + { + "value": "ml.g5.xlarge", + "name": "ML_G5_XLARGE" + }, + { + "value": "ml.g5.2xlarge", + "name": "ML_G5_2XLARGE" + }, + { + "value": "ml.g5.4xlarge", + "name": "ML_G5_4XLARGE" + }, + { + "value": "ml.g5.8xlarge", + "name": "ML_G5_8XLARGE" + }, + { + "value": "ml.g5.16xlarge", + "name": "ML_G5_16XLARGE" + }, + { + "value": "ml.g5.12xlarge", + "name": "ML_G5_12XLARGE" + }, + { + "value": "ml.g5.24xlarge", + "name": "ML_G5_24XLARGE" + }, + { + "value": "ml.g5.48xlarge", + "name": "ML_G5_48XLARGE" } ] } @@ -40862,6 +41085,12 @@ "traits": { "smithy.api#documentation": "

              The display name of the pipeline execution.

              " } + }, + "ParallelismConfiguration": { + "target": "com.amazonaws.sagemaker#ParallelismConfiguration", + "traits": { + "smithy.api#documentation": "

              This configuration, if specified, overrides the parallelism configuration \n of the parent pipeline for this specific run.

              " + } } } }, @@ -40898,6 +41127,12 @@ "smithy.api#documentation": "

              The JSON pipeline definition.

              " } }, + "PipelineDefinitionS3Location": { + "target": "com.amazonaws.sagemaker#PipelineDefinitionS3Location", + "traits": { + "smithy.api#documentation": "

              The location of the pipeline definition stored in Amazon S3. If specified, \n SageMaker will retrieve the pipeline definition from this location.

              " + } + }, "PipelineDescription": { "target": "com.amazonaws.sagemaker#PipelineDescription", "traits": { @@ -40909,6 +41144,12 @@ "traits": { "smithy.api#documentation": "

              The Amazon Resource Name (ARN) that the pipeline uses to execute.

              " } + }, + "ParallelismConfiguration": { + "target": "com.amazonaws.sagemaker#ParallelismConfiguration", + "traits": { + "smithy.api#documentation": "

              If specified, it applies to all executions of this pipeline by default.

              " + } } } }, @@ -41655,6 +41896,16 @@ } } }, + "com.amazonaws.sagemaker#VersionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^.+$" + } + }, "com.amazonaws.sagemaker#VersionedArnOrName": { "type": "string", "traits": { diff --git a/codegen/sdk-codegen/aws-models/snowball.json b/codegen/sdk-codegen/aws-models/snowball.json index 3af40ed12ae2..20d3333a8f17 100644 --- a/codegen/sdk-codegen/aws-models/snowball.json +++ b/codegen/sdk-codegen/aws-models/snowball.json @@ -539,7 +539,10 @@ "type": "structure", "members": { "ConflictResource": { - "target": "com.amazonaws.snowball#String" + "target": "com.amazonaws.snowball#String", + "traits": { + "smithy.api#documentation": "

              You get this resource when you call CreateReturnShippingLabel more than once when other requests are not completed. .

              " + } }, "Message": { "target": "com.amazonaws.snowball#String" @@ -1343,6 +1346,10 @@ "com.amazonaws.snowball#GSTIN": { "type": "string", "traits": { + "smithy.api#length": { + "min": 15, + "max": 15 + }, "smithy.api#pattern": "^\\d{2}[A-Z]{5}\\d{4}[A-Z]{1}[A-Z\\d]{1}[Z]{1}[A-Z\\d]{1}$" } }, @@ -2783,7 +2790,8 @@ "smithy.api#length": { "min": 1, "max": 1024 - } + }, + "smithy.api#pattern": ".*" } }, "com.amazonaws.snowball#TGWOnDeviceServiceConfiguration": { diff --git a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json index 872329c00643..7d58ca4198f1 100644 --- a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json +++ b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json @@ -7527,22 +7527,134 @@ }, "lambda": { "endpoints": { - "af-south-1": {}, - "ap-east-1": {}, - "ap-northeast-1": {}, - "ap-northeast-2": {}, - "ap-northeast-3": {}, - "ap-south-1": {}, - "ap-southeast-1": {}, - "ap-southeast-2": {}, - "ap-southeast-3": {}, - "ca-central-1": {}, - "eu-central-1": {}, - "eu-north-1": {}, - "eu-south-1": {}, - "eu-west-1": {}, - "eu-west-2": {}, - "eu-west-3": {}, + "af-south-1": { + "variants": [ + { + "hostname": "lambda.af-south-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "ap-east-1": { + "variants": [ + { + "hostname": "lambda.ap-east-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "ap-northeast-1": { + "variants": [ + { + "hostname": "lambda.ap-northeast-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "ap-northeast-2": { + "variants": [ + { + "hostname": "lambda.ap-northeast-2.api.aws", + "tags": ["dualstack"] + } + ] + }, + "ap-northeast-3": { + "variants": [ + { + "hostname": "lambda.ap-northeast-3.api.aws", + "tags": ["dualstack"] + } + ] + }, + "ap-south-1": { + "variants": [ + { + "hostname": "lambda.ap-south-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "ap-southeast-1": { + "variants": [ + { + "hostname": "lambda.ap-southeast-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "ap-southeast-2": { + "variants": [ + { + "hostname": "lambda.ap-southeast-2.api.aws", + "tags": ["dualstack"] + } + ] + }, + "ap-southeast-3": { + "variants": [ + { + "hostname": "lambda.ap-southeast-3.api.aws", + "tags": ["dualstack"] + } + ] + }, + "ca-central-1": { + "variants": [ + { + "hostname": "lambda.ca-central-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "eu-central-1": { + "variants": [ + { + "hostname": "lambda.eu-central-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "eu-north-1": { + "variants": [ + { + "hostname": "lambda.eu-north-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "eu-south-1": { + "variants": [ + { + "hostname": "lambda.eu-south-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "eu-west-1": { + "variants": [ + { + "hostname": "lambda.eu-west-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "eu-west-2": { + "variants": [ + { + "hostname": "lambda.eu-west-2.api.aws", + "tags": ["dualstack"] + } + ] + }, + "eu-west-3": { + "variants": [ + { + "hostname": "lambda.eu-west-3.api.aws", + "tags": ["dualstack"] + } + ] + }, "fips-us-east-1": { "credentialScope": { "region": "us-east-1" @@ -7571,13 +7683,31 @@ "deprecated": true, "hostname": "lambda-fips.us-west-2.amazonaws.com" }, - "me-south-1": {}, - "sa-east-1": {}, + "me-south-1": { + "variants": [ + { + "hostname": "lambda.me-south-1.api.aws", + "tags": ["dualstack"] + } + ] + }, + "sa-east-1": { + "variants": [ + { + "hostname": "lambda.sa-east-1.api.aws", + "tags": ["dualstack"] + } + ] + }, "us-east-1": { "variants": [ { "hostname": "lambda-fips.us-east-1.amazonaws.com", "tags": ["fips"] + }, + { + "hostname": "lambda.us-east-1.api.aws", + "tags": ["dualstack"] } ] }, @@ -7586,6 +7716,10 @@ { "hostname": "lambda-fips.us-east-2.amazonaws.com", "tags": ["fips"] + }, + { + "hostname": "lambda.us-east-2.api.aws", + "tags": ["dualstack"] } ] }, @@ -7594,6 +7728,10 @@ { "hostname": "lambda-fips.us-west-1.amazonaws.com", "tags": ["fips"] + }, + { + "hostname": "lambda.us-west-1.api.aws", + "tags": ["dualstack"] } ] }, @@ -7602,6 +7740,10 @@ { "hostname": "lambda-fips.us-west-2.amazonaws.com", "tags": ["fips"] + }, + { + "hostname": "lambda.us-west-2.api.aws", + "tags": ["dualstack"] } ] } @@ -11000,6 +11142,7 @@ "ap-south-1": {}, "ap-southeast-1": {}, "ap-southeast-2": {}, + "ap-southeast-3": {}, "ca-central-1": {}, "eu-central-1": {}, "eu-north-1": {}, @@ -14339,8 +14482,22 @@ }, "lambda": { "endpoints": { - "cn-north-1": {}, - "cn-northwest-1": {} + "cn-north-1": { + "variants": [ + { + "hostname": "lambda.cn-north-1.api.amazonwebservices.com.cn", + "tags": ["dualstack"] + } + ] + }, + "cn-northwest-1": { + "variants": [ + { + "hostname": "lambda.cn-northwest-1.api.amazonwebservices.com.cn", + "tags": ["dualstack"] + } + ] + } } }, "license-manager": {