diff --git a/.changelog/36227b137362432682ac126094d22187.json b/.changelog/36227b137362432682ac126094d22187.json new file mode 100644 index 00000000000..ad07e199d14 --- /dev/null +++ b/.changelog/36227b137362432682ac126094d22187.json @@ -0,0 +1,8 @@ +{ + "id": "36227b13-7362-4326-82ac-126094d22187", + "type": "documentation", + "description": "Documentation updates for Amazon ECS.", + "modules": [ + "service/ecs" + ] +} \ No newline at end of file diff --git a/.changelog/37cd600879e4457da5697553f9145222.json b/.changelog/37cd600879e4457da5697553f9145222.json new file mode 100644 index 00000000000..b6548c37875 --- /dev/null +++ b/.changelog/37cd600879e4457da5697553f9145222.json @@ -0,0 +1,8 @@ +{ + "id": "37cd6008-79e4-457d-a569-7553f9145222", + "type": "feature", + "description": "Introduced a new clientToken request parameter on CreateNetworkAcl and CreateRouteTable APIs. The clientToken parameter allows idempotent operations on the APIs.", + "modules": [ + "service/ec2" + ] +} \ No newline at end of file diff --git a/.changelog/6b8bfd62832a429caf33ec3e5f7f740f.json b/.changelog/6b8bfd62832a429caf33ec3e5f7f740f.json new file mode 100644 index 00000000000..0616cdccb39 --- /dev/null +++ b/.changelog/6b8bfd62832a429caf33ec3e5f7f740f.json @@ -0,0 +1,8 @@ +{ + "id": "6b8bfd62-832a-429c-af33-ec3e5f7f740f", + "type": "feature", + "description": "DeviceSerialNumber parameter is now optional in StartConnection API", + "modules": [ + "service/outposts" + ] +} \ No newline at end of file diff --git a/.changelog/815f39c4c48d4c4b95ab3e9dc9cd5e19.json b/.changelog/815f39c4c48d4c4b95ab3e9dc9cd5e19.json new file mode 100644 index 00000000000..96b273f6a6c --- /dev/null +++ b/.changelog/815f39c4c48d4c4b95ab3e9dc9cd5e19.json @@ -0,0 +1,8 @@ +{ + "id": "815f39c4-c48d-4c4b-95ab-3e9dc9cd5e19", + "type": "feature", + "description": "This release adds support for Aurora Limitless Database.", + "modules": [ + "service/rds" + ] +} \ No newline at end of file diff --git a/.changelog/a81ad3b247404bd8818638c35c679ce6.json b/.changelog/a81ad3b247404bd8818638c35c679ce6.json new file mode 100644 index 00000000000..509e63bb14f --- /dev/null +++ b/.changelog/a81ad3b247404bd8818638c35c679ce6.json @@ -0,0 +1,8 @@ +{ + "id": "a81ad3b2-4740-4bd8-8186-38c35c679ce6", + "type": "feature", + "description": "Add DeprecationDate and SoftwareVersion to response of ListGateways.", + "modules": [ + "service/storagegateway" + ] +} \ No newline at end of file diff --git a/service/ec2/api_op_CreateEgressOnlyInternetGateway.go b/service/ec2/api_op_CreateEgressOnlyInternetGateway.go index f440022c767..9b8620a3c50 100644 --- a/service/ec2/api_op_CreateEgressOnlyInternetGateway.go +++ b/service/ec2/api_op_CreateEgressOnlyInternetGateway.go @@ -39,7 +39,7 @@ type CreateEgressOnlyInternetGatewayInput struct { VpcId *string // Unique, case-sensitive identifier that you provide to ensure the idempotency of - // the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) // . ClientToken *string diff --git a/service/ec2/api_op_CreateNatGateway.go b/service/ec2/api_op_CreateNatGateway.go index e841db05bd6..ee6981b828a 100644 --- a/service/ec2/api_op_CreateNatGateway.go +++ b/service/ec2/api_op_CreateNatGateway.go @@ -61,7 +61,7 @@ type CreateNatGatewayInput struct { AllocationId *string // Unique, case-sensitive identifier that you provide to ensure the idempotency of - // the request. For more information, see How to ensure idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html) + // the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html) // . Constraint: Maximum 64 ASCII characters. ClientToken *string diff --git a/service/ec2/api_op_CreateNetworkAcl.go b/service/ec2/api_op_CreateNetworkAcl.go index 7ed64730d4a..3d126ffde75 100644 --- a/service/ec2/api_op_CreateNetworkAcl.go +++ b/service/ec2/api_op_CreateNetworkAcl.go @@ -38,6 +38,11 @@ type CreateNetworkAclInput struct { // This member is required. VpcId *string + // Unique, case-sensitive identifier that you provide to ensure the idempotency of + // the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // . + ClientToken *string + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have the // required permissions, the error response is DryRunOperation . Otherwise, it is @@ -52,6 +57,10 @@ type CreateNetworkAclInput struct { type CreateNetworkAclOutput struct { + // Unique, case-sensitive identifier to ensure the idempotency of the request. + // Only returned if a client token was provided in the request. + ClientToken *string + // Information about the network ACL. NetworkAcl *types.NetworkAcl @@ -116,6 +125,9 @@ func (c *Client) addOperationCreateNetworkAclMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addIdempotencyToken_opCreateNetworkAclMiddleware(stack, options); err != nil { + return err + } if err = addOpCreateNetworkAclValidationMiddleware(stack); err != nil { return err } @@ -140,6 +152,39 @@ func (c *Client) addOperationCreateNetworkAclMiddlewares(stack *middleware.Stack return nil } +type idempotencyToken_initializeOpCreateNetworkAcl struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateNetworkAcl) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateNetworkAcl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateNetworkAclInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateNetworkAclInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateNetworkAclMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateNetworkAcl{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + func newServiceMetadataMiddleware_opCreateNetworkAcl(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/service/ec2/api_op_CreateRouteTable.go b/service/ec2/api_op_CreateRouteTable.go index 816cb6a2a67..d73e6f5dccf 100644 --- a/service/ec2/api_op_CreateRouteTable.go +++ b/service/ec2/api_op_CreateRouteTable.go @@ -38,6 +38,11 @@ type CreateRouteTableInput struct { // This member is required. VpcId *string + // Unique, case-sensitive identifier that you provide to ensure the idempotency of + // the request. For more information, see Ensuring idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html) + // . + ClientToken *string + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have the // required permissions, the error response is DryRunOperation . Otherwise, it is @@ -52,6 +57,10 @@ type CreateRouteTableInput struct { type CreateRouteTableOutput struct { + // Unique, case-sensitive identifier to ensure the idempotency of the request. + // Only returned if a client token was provided in the request. + ClientToken *string + // Information about the route table. RouteTable *types.RouteTable @@ -116,6 +125,9 @@ func (c *Client) addOperationCreateRouteTableMiddlewares(stack *middleware.Stack if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } + if err = addIdempotencyToken_opCreateRouteTableMiddleware(stack, options); err != nil { + return err + } if err = addOpCreateRouteTableValidationMiddleware(stack); err != nil { return err } @@ -140,6 +152,39 @@ func (c *Client) addOperationCreateRouteTableMiddlewares(stack *middleware.Stack return nil } +type idempotencyToken_initializeOpCreateRouteTable struct { + tokenProvider IdempotencyTokenProvider +} + +func (*idempotencyToken_initializeOpCreateRouteTable) ID() string { + return "OperationIdempotencyTokenAutoFill" +} + +func (m *idempotencyToken_initializeOpCreateRouteTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.tokenProvider == nil { + return next.HandleInitialize(ctx, in) + } + + input, ok := in.Parameters.(*CreateRouteTableInput) + if !ok { + return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateRouteTableInput ") + } + + if input.ClientToken == nil { + t, err := m.tokenProvider.GetIdempotencyToken() + if err != nil { + return out, metadata, err + } + input.ClientToken = &t + } + return next.HandleInitialize(ctx, in) +} +func addIdempotencyToken_opCreateRouteTableMiddleware(stack *middleware.Stack, cfg Options) error { + return stack.Initialize.Add(&idempotencyToken_initializeOpCreateRouteTable{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) +} + func newServiceMetadataMiddleware_opCreateRouteTable(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, diff --git a/service/ec2/deserializers.go b/service/ec2/deserializers.go index aa486aff734..509accbb62a 100644 --- a/service/ec2/deserializers.go +++ b/service/ec2/deserializers.go @@ -142777,6 +142777,19 @@ func awsEc2query_deserializeOpDocumentCreateNetworkAclOutput(v **CreateNetworkAc originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("clientToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ClientToken = ptr.String(xtv) + } + case strings.EqualFold("networkAcl", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentNetworkAcl(&sv.NetworkAcl, nodeDecoder); err != nil { @@ -143282,6 +143295,19 @@ func awsEc2query_deserializeOpDocumentCreateRouteTableOutput(v **CreateRouteTabl originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { + case strings.EqualFold("clientToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ClientToken = ptr.String(xtv) + } + case strings.EqualFold("routeTable", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) if err := awsEc2query_deserializeDocumentRouteTable(&sv.RouteTable, nodeDecoder); err != nil { diff --git a/service/ec2/serializers.go b/service/ec2/serializers.go index f92d0ffcd61..892ed1d9a5e 100644 --- a/service/ec2/serializers.go +++ b/service/ec2/serializers.go @@ -51699,6 +51699,11 @@ func awsEc2query_serializeOpDocumentCreateNetworkAclInput(v *CreateNetworkAclInp object := value.Object() _ = object + if v.ClientToken != nil { + objectKey := object.Key("ClientToken") + objectKey.String(*v.ClientToken) + } + if v.DryRun != nil { objectKey := object.Key("DryRun") objectKey.Boolean(*v.DryRun) @@ -52224,6 +52229,11 @@ func awsEc2query_serializeOpDocumentCreateRouteTableInput(v *CreateRouteTableInp object := value.Object() _ = object + if v.ClientToken != nil { + objectKey := object.Key("ClientToken") + objectKey.String(*v.ClientToken) + } + if v.DryRun != nil { objectKey := object.Key("DryRun") objectKey.Boolean(*v.DryRun) diff --git a/service/ec2/types/enums.go b/service/ec2/types/enums.go index 05f2e59d094..b9c15667c72 100644 --- a/service/ec2/types/enums.go +++ b/service/ec2/types/enums.go @@ -582,6 +582,7 @@ const ( AvailabilityZoneStateInformation AvailabilityZoneState = "information" AvailabilityZoneStateImpaired AvailabilityZoneState = "impaired" AvailabilityZoneStateUnavailable AvailabilityZoneState = "unavailable" + AvailabilityZoneStateConstrained AvailabilityZoneState = "constrained" ) // Values returns all known values for AvailabilityZoneState. Note that this can @@ -593,6 +594,7 @@ func (AvailabilityZoneState) Values() []AvailabilityZoneState { "information", "impaired", "unavailable", + "constrained", } } @@ -7292,8 +7294,9 @@ type SubnetState string // Enum values for SubnetState const ( - SubnetStatePending SubnetState = "pending" - SubnetStateAvailable SubnetState = "available" + SubnetStatePending SubnetState = "pending" + SubnetStateAvailable SubnetState = "available" + SubnetStateUnavailable SubnetState = "unavailable" ) // Values returns all known values for SubnetState. Note that this can be expanded @@ -7303,6 +7306,7 @@ func (SubnetState) Values() []SubnetState { return []SubnetState{ "pending", "available", + "unavailable", } } diff --git a/service/ecs/api_op_CreateTaskSet.go b/service/ecs/api_op_CreateTaskSet.go index 7596b4c09cc..39d0d7bda5c 100644 --- a/service/ecs/api_op_CreateTaskSet.go +++ b/service/ecs/api_op_CreateTaskSet.go @@ -15,8 +15,9 @@ import ( // Create a task set in the specified cluster and service. This is used when a // service uses the EXTERNAL deployment controller type. For more information, see // Amazon ECS deployment types (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-types.html) -// in the Amazon Elastic Container Service Developer Guide. You can create a -// maximum of 5 tasks sets for a deployment. +// in the Amazon Elastic Container Service Developer Guide. For information about +// the maximum number of task sets and otther quotas, see Amazon ECS service quotas (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-quotas.html) +// in the Amazon Elastic Container Service Developer Guide. func (c *Client) CreateTaskSet(ctx context.Context, params *CreateTaskSetInput, optFns ...func(*Options)) (*CreateTaskSetOutput, error) { if params == nil { params = &CreateTaskSetInput{} diff --git a/service/ecs/api_op_StopTask.go b/service/ecs/api_op_StopTask.go index 286adb585f8..0ae42817eaf 100644 --- a/service/ecs/api_op_StopTask.go +++ b/service/ecs/api_op_StopTask.go @@ -51,7 +51,7 @@ type StopTaskInput struct { // An optional message specified when a task is stopped. For example, if you're // using a custom scheduler, you can use this parameter to specify the reason for // stopping the task here, and the message appears in subsequent DescribeTasks API - // operations on this task. Up to 255 characters are allowed in this message. + // operations on this task. Reason *string noSmithyDocumentSerde diff --git a/service/ecs/types/types.go b/service/ecs/types/types.go index 32aadaecb6c..0fc3ac22de2 100644 --- a/service/ecs/types/types.go +++ b/service/ecs/types/types.go @@ -140,7 +140,9 @@ type AutoScalingGroupProviderUpdate struct { noSmithyDocumentSerde } -// An object representing the networking details for a task or service. +// An object representing the networking details for a task or service. For +// example +// awsvpcConfiguration={subnets=["subnet-12344321"],securityGroups=["sg-12344321"]} type AwsVpcConfiguration struct { // The IDs of the subnets associated with the task or service. There's a limit of @@ -1034,7 +1036,7 @@ type ContainerDefinition struct { // system with the exception of the nofile resource limit parameter which Fargate // overrides. The nofile resource limit sets a restriction on the number of open // files that a container can use. The default nofile soft limit is 1024 and the - // default hard limit is 4096 . This parameter requires version 1.18 of the Docker + // default hard limit is 65535 . This parameter requires version 1.18 of the Docker // Remote API or greater on your container instance. To check the Docker Remote API // version on your container instance, log in to your container instance and run // the following command: sudo docker version --format '{{.Server.APIVersion}}' @@ -1091,6 +1093,10 @@ type ContainerDefinition struct { // Fargate launch type, the task or service requires the following platforms: // - Linux platform version 1.3.0 or later. // - Windows platform version 1.0.0 or later. +// +// For more information about how to create a container dependency, see Container +// dependency (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/example_task_definitions.html#example_task_definition-containerdependency) +// in the Amazon Elastic Container Service Developer Guide. type ContainerDependency struct { // The dependency condition of the container. The following are the available @@ -1551,7 +1557,7 @@ type DeploymentConfiguration struct { // healthy when all essential containers within the task have passed their health // checks. The amount of time the service scheduler can wait for is determined by // the container health check settings. - // For services are that do use a load balancer, the following should be noted: + // For services that do use a load balancer, the following should be noted: // - If a task has no essential containers with a health check defined, the // service scheduler will wait for the load balancer target group health check to // return a healthy status before counting the task towards the minimum healthy @@ -3270,7 +3276,8 @@ type ServiceConnectService struct { // Connect. Timeout *TimeoutConfiguration - // An object that represents the configuration for Service Connect TLS. + // A reference to an object that represents a Transport Layer Security (TLS) + // configuration. Tls *ServiceConnectTlsConfiguration noSmithyDocumentSerde @@ -3764,13 +3771,7 @@ type Task struct { // The stop code indicating why a task was stopped. The stoppedReason might // contain additional details. For more information about stop code, see Stopped // tasks error codes (https://docs.aws.amazon.com/AmazonECS/latest/userguide/stopped-task-error-codes.html) - // in the Amazon ECS User Guide. The following are valid values: - // - TaskFailedToStart - // - EssentialContainerExited - // - UserInitiated - // - TerminationNotice - // - ServiceSchedulerInitiated - // - SpotInterruption + // in the Amazon ECS User Guide. StopCode TaskStopCode // The Unix timestamp for the time when the task was stopped. More specifically, @@ -4429,7 +4430,7 @@ type Tmpfs struct { // the exception of the nofile resource limit parameter which Fargate overrides. // The nofile resource limit sets a restriction on the number of open files that a // container can use. The default nofile soft limit is 1024 and the default hard -// limit is 4096 . You can specify the ulimit settings for a container in a task +// limit is 65535 . You can specify the ulimit settings for a container in a task // definition. type Ulimit struct { diff --git a/service/outposts/api_op_StartConnection.go b/service/outposts/api_op_StartConnection.go index 611c402cadd..4e1fd58d8a6 100644 --- a/service/outposts/api_op_StartConnection.go +++ b/service/outposts/api_op_StartConnection.go @@ -46,16 +46,14 @@ type StartConnectionInput struct { // This member is required. ClientPublicKey *string - // The serial number of the dongle. - // - // This member is required. - DeviceSerialNumber *string - // The device index of the network interface on the Outpost server. // // This member is required. NetworkInterfaceDeviceIndex int32 + // The serial number of the dongle. + DeviceSerialNumber *string + noSmithyDocumentSerde } diff --git a/service/outposts/validators.go b/service/outposts/validators.go index 2cdafb97741..5dd06574253 100644 --- a/service/outposts/validators.go +++ b/service/outposts/validators.go @@ -817,9 +817,6 @@ func validateOpStartConnectionInput(v *StartConnectionInput) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "StartConnectionInput"} - if v.DeviceSerialNumber == nil { - invalidParams.Add(smithy.NewErrParamRequired("DeviceSerialNumber")) - } if v.AssetId == nil { invalidParams.Add(smithy.NewErrParamRequired("AssetId")) } diff --git a/service/rds/api_op_CreateBlueGreenDeployment.go b/service/rds/api_op_CreateBlueGreenDeployment.go index 19fc062072c..8a14c57e70a 100644 --- a/service/rds/api_op_CreateBlueGreenDeployment.go +++ b/service/rds/api_op_CreateBlueGreenDeployment.go @@ -68,7 +68,12 @@ type CreateBlueGreenDeploymentInput struct { // group that is different from the one associated with the source DB cluster. TargetDBClusterParameterGroupName *string - // Specify the DB instance class for the databases in the green environment. + // Specify the DB instance class for the databases in the green environment. This + // parameter only applies to RDS DB instances, because DB instances within an + // Aurora DB cluster can have multiple different instance classes. If you're + // creating a blue/green deployment from an Aurora DB cluster, don't specify this + // parameter. After the green environment is created, you can individually modify + // the instance classes of the DB instances within the green DB cluster. TargetDBInstanceClass *string // The DB parameter group associated with the DB instance in the green diff --git a/service/rds/api_op_CreateDBCluster.go b/service/rds/api_op_CreateDBCluster.go index 97a073fb950..424ec9883c1 100644 --- a/service/rds/api_op_CreateDBCluster.go +++ b/service/rds/api_op_CreateDBCluster.go @@ -190,6 +190,11 @@ type CreateDBClusterInput struct { // in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only EnableIAMDatabaseAuthentication *bool + // Specifies whether to enable Aurora Limitless Database. You must enable Aurora + // Limitless Database to create a DB shard group. Valid for: Aurora DB clusters + // only + EnableLimitlessDatabase *bool + // Specifies whether read replicas can forward write operations to the writer DB // instance in the DB cluster. By default, write operations aren't allowed on // reader DB instances. Valid for: Aurora DB clusters only diff --git a/service/rds/api_op_CreateDBShardGroup.go b/service/rds/api_op_CreateDBShardGroup.go new file mode 100644 index 00000000000..a86e190300f --- /dev/null +++ b/service/rds/api_op_CreateDBShardGroup.go @@ -0,0 +1,219 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package rds + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a new DB shard group for Aurora Limitless Database. You must enable +// Aurora Limitless Database to create a DB shard group. Valid for: Aurora DB +// clusters only +func (c *Client) CreateDBShardGroup(ctx context.Context, params *CreateDBShardGroupInput, optFns ...func(*Options)) (*CreateDBShardGroupOutput, error) { + if params == nil { + params = &CreateDBShardGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateDBShardGroup", params, optFns, c.addOperationCreateDBShardGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateDBShardGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateDBShardGroupInput struct { + + // The name of the primary DB cluster for the DB shard group. + // + // This member is required. + DBClusterIdentifier *string + + // The name of the DB shard group. + // + // This member is required. + DBShardGroupIdentifier *string + + // The maximum capacity of the DB shard group in Aurora capacity units (ACUs). + // + // This member is required. + MaxACU *float64 + + // Specifies whether to create standby instances for the DB shard group. Valid + // values are the following: + // - 0 - Creates a single, primary DB instance for each physical shard. This is + // the default value, and the only one supported for the preview. + // - 1 - Creates a primary DB instance and a standby instance in a different + // Availability Zone (AZ) for each physical shard. + // - 2 - Creates a primary DB instance and two standby instances in different + // AZs for each physical shard. + ComputeRedundancy *int32 + + // Specifies whether the DB shard group is publicly accessible. When the DB shard + // group is publicly accessible, its Domain Name System (DNS) endpoint resolves to + // the private IP address from within the DB shard group's virtual private cloud + // (VPC). It resolves to the public IP address from outside of the DB shard group's + // VPC. Access to the DB shard group is ultimately controlled by the security group + // it uses. That public access is not permitted if the security group assigned to + // the DB shard group doesn't permit it. When the DB shard group isn't publicly + // accessible, it is an internal DB shard group with a DNS name that resolves to a + // private IP address. Default: The default behavior varies depending on whether + // DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and + // PubliclyAccessible isn't specified, the following applies: + // - If the default VPC in the target Region doesn’t have an internet gateway + // attached to it, the DB shard group is private. + // - If the default VPC in the target Region has an internet gateway attached to + // it, the DB shard group is public. + // If DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the + // following applies: + // - If the subnets are part of a VPC that doesn’t have an internet gateway + // attached to it, the DB shard group is private. + // - If the subnets are part of a VPC that has an internet gateway attached to + // it, the DB shard group is public. + PubliclyAccessible *bool + + noSmithyDocumentSerde +} + +type CreateDBShardGroupOutput struct { + + // Specifies whether to create standby instances for the DB shard group. Valid + // values are the following: + // - 0 - Creates a single, primary DB instance for each physical shard. This is + // the default value, and the only one supported for the preview. + // - 1 - Creates a primary DB instance and a standby instance in a different + // Availability Zone (AZ) for each physical shard. + // - 2 - Creates a primary DB instance and two standby instances in different + // AZs for each physical shard. + ComputeRedundancy *int32 + + // The name of the primary DB cluster for the DB shard group. + DBClusterIdentifier *string + + // The name of the DB shard group. + DBShardGroupIdentifier *string + + // The Amazon Web Services Region-unique, immutable identifier for the DB shard + // group. + DBShardGroupResourceId *string + + // The connection endpoint for the DB shard group. + Endpoint *string + + // The maximum capacity of the DB shard group in Aurora capacity units (ACUs). + MaxACU *float64 + + // Indicates whether the DB shard group is publicly accessible. When the DB shard + // group is publicly accessible, its Domain Name System (DNS) endpoint resolves to + // the private IP address from within the DB shard group's virtual private cloud + // (VPC). It resolves to the public IP address from outside of the DB shard group's + // VPC. Access to the DB shard group is ultimately controlled by the security group + // it uses. That public access isn't permitted if the security group assigned to + // the DB shard group doesn't permit it. When the DB shard group isn't publicly + // accessible, it is an internal DB shard group with a DNS name that resolves to a + // private IP address. For more information, see CreateDBShardGroup . This setting + // is only for Aurora Limitless Database. + PubliclyAccessible *bool + + // The status of the DB shard group. + Status *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateDBShardGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpCreateDBShardGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpCreateDBShardGroup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateDBShardGroup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpCreateDBShardGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDBShardGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opCreateDBShardGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateDBShardGroup", + } +} diff --git a/service/rds/api_op_CreateEventSubscription.go b/service/rds/api_op_CreateEventSubscription.go index 8a6768dcfde..e435bd6f03d 100644 --- a/service/rds/api_op_CreateEventSubscription.go +++ b/service/rds/api_op_CreateEventSubscription.go @@ -50,7 +50,10 @@ func (c *Client) CreateEventSubscription(ctx context.Context, params *CreateEven type CreateEventSubscriptionInput struct { // The Amazon Resource Name (ARN) of the SNS topic created for event notification. - // The ARN is created by Amazon SNS when you create a topic and subscribe to it. + // SNS automatically creates the ARN when you create a topic and subscribe to it. + // RDS doesn't support FIFO (first in, first out) topics. For more information, see + // Message ordering and deduplication (FIFO topics) (https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html) + // in the Amazon Simple Notification Service Developer Guide. // // This member is required. SnsTopicArn *string diff --git a/service/rds/api_op_DeleteDBShardGroup.go b/service/rds/api_op_DeleteDBShardGroup.go new file mode 100644 index 00000000000..b6ebd3ff590 --- /dev/null +++ b/service/rds/api_op_DeleteDBShardGroup.go @@ -0,0 +1,174 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package rds + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes an Aurora Limitless Database DB shard group. +func (c *Client) DeleteDBShardGroup(ctx context.Context, params *DeleteDBShardGroupInput, optFns ...func(*Options)) (*DeleteDBShardGroupOutput, error) { + if params == nil { + params = &DeleteDBShardGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteDBShardGroup", params, optFns, c.addOperationDeleteDBShardGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteDBShardGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteDBShardGroupInput struct { + + // Teh name of the DB shard group to delete. + // + // This member is required. + DBShardGroupIdentifier *string + + noSmithyDocumentSerde +} + +type DeleteDBShardGroupOutput struct { + + // Specifies whether to create standby instances for the DB shard group. Valid + // values are the following: + // - 0 - Creates a single, primary DB instance for each physical shard. This is + // the default value, and the only one supported for the preview. + // - 1 - Creates a primary DB instance and a standby instance in a different + // Availability Zone (AZ) for each physical shard. + // - 2 - Creates a primary DB instance and two standby instances in different + // AZs for each physical shard. + ComputeRedundancy *int32 + + // The name of the primary DB cluster for the DB shard group. + DBClusterIdentifier *string + + // The name of the DB shard group. + DBShardGroupIdentifier *string + + // The Amazon Web Services Region-unique, immutable identifier for the DB shard + // group. + DBShardGroupResourceId *string + + // The connection endpoint for the DB shard group. + Endpoint *string + + // The maximum capacity of the DB shard group in Aurora capacity units (ACUs). + MaxACU *float64 + + // Indicates whether the DB shard group is publicly accessible. When the DB shard + // group is publicly accessible, its Domain Name System (DNS) endpoint resolves to + // the private IP address from within the DB shard group's virtual private cloud + // (VPC). It resolves to the public IP address from outside of the DB shard group's + // VPC. Access to the DB shard group is ultimately controlled by the security group + // it uses. That public access isn't permitted if the security group assigned to + // the DB shard group doesn't permit it. When the DB shard group isn't publicly + // accessible, it is an internal DB shard group with a DNS name that resolves to a + // private IP address. For more information, see CreateDBShardGroup . This setting + // is only for Aurora Limitless Database. + PubliclyAccessible *bool + + // The status of the DB shard group. + Status *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteDBShardGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpDeleteDBShardGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDeleteDBShardGroup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteDBShardGroup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteDBShardGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteDBShardGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDeleteDBShardGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteDBShardGroup", + } +} diff --git a/service/rds/api_op_DescribeDBClusters.go b/service/rds/api_op_DescribeDBClusters.go index f7257d76ee0..1b65f46d613 100644 --- a/service/rds/api_op_DescribeDBClusters.go +++ b/service/rds/api_op_DescribeDBClusters.go @@ -76,7 +76,7 @@ type DescribeDBClustersInput struct { // The maximum number of records to include in the response. If more records exist // than the specified MaxRecords value, a pagination token called a marker is // included in the response so you can retrieve the remaining results. Default: 100 - // Constraints: Minimum 20, maximum 100. + // Constraints: Minimum 20, maximum 100 MaxRecords *int32 noSmithyDocumentSerde @@ -190,7 +190,7 @@ type DescribeDBClustersPaginatorOptions struct { // The maximum number of records to include in the response. If more records exist // than the specified MaxRecords value, a pagination token called a marker is // included in the response so you can retrieve the remaining results. Default: 100 - // Constraints: Minimum 20, maximum 100. + // Constraints: Minimum 20, maximum 100 Limit int32 // Set to true if pagination should stop if the service returns a pagination token diff --git a/service/rds/api_op_DescribeDBShardGroups.go b/service/rds/api_op_DescribeDBShardGroups.go new file mode 100644 index 00000000000..baa58889a4d --- /dev/null +++ b/service/rds/api_op_DescribeDBShardGroups.go @@ -0,0 +1,156 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package rds + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/service/rds/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Describes existing Aurora Limitless Database DB shard groups. +func (c *Client) DescribeDBShardGroups(ctx context.Context, params *DescribeDBShardGroupsInput, optFns ...func(*Options)) (*DescribeDBShardGroupsOutput, error) { + if params == nil { + params = &DescribeDBShardGroupsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DescribeDBShardGroups", params, optFns, c.addOperationDescribeDBShardGroupsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DescribeDBShardGroupsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DescribeDBShardGroupsInput struct { + + // The user-supplied DB shard group identifier or the Amazon Resource Name (ARN) + // of the DB shard group. If this parameter is specified, information for only the + // specific DB shard group is returned. This parameter isn't case-sensitive. + // Constraints: + // - If supplied, must match an existing DB shard group identifier. + DBShardGroupIdentifier *string + + // A filter that specifies one or more DB shard groups to describe. + Filters []types.Filter + + // An optional pagination token provided by a previous DescribeDBShardGroups + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords . + Marker *string + + // The maximum number of records to include in the response. If more records exist + // than the specified MaxRecords value, a pagination token called a marker is + // included in the response so you can retrieve the remaining results. Default: 100 + // Constraints: Minimum 20, maximum 100 + MaxRecords *int32 + + noSmithyDocumentSerde +} + +type DescribeDBShardGroupsOutput struct { + + // Contains a list of DB shard groups for the user. + DBShardGroups []types.DBShardGroup + + // A pagination token that can be used in a later DescribeDBClusters request. + Marker *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDescribeDBShardGroupsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpDescribeDBShardGroups{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpDescribeDBShardGroups{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeDBShardGroups"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpDescribeDBShardGroupsValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeDBShardGroups(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opDescribeDBShardGroups(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DescribeDBShardGroups", + } +} diff --git a/service/rds/api_op_FailoverDBCluster.go b/service/rds/api_op_FailoverDBCluster.go index e7ac7b82cd5..575f3ab39a5 100644 --- a/service/rds/api_op_FailoverDBCluster.go +++ b/service/rds/api_op_FailoverDBCluster.go @@ -15,16 +15,18 @@ import ( // Forces a failover for a DB cluster. For an Aurora DB cluster, failover for a DB // cluster promotes one of the Aurora Replicas (read-only instances) in the DB // cluster to be the primary DB instance (the cluster writer). For a Multi-AZ DB -// cluster, failover for a DB cluster promotes one of the readable standby DB -// instances (read-only instances) in the DB cluster to be the primary DB instance -// (the cluster writer). An Amazon Aurora DB cluster automatically fails over to an -// Aurora Replica, if one exists, when the primary DB instance fails. A Multi-AZ DB -// cluster automatically fails over to a readable standby DB instance when the -// primary DB instance fails. To simulate a failure of a primary instance for -// testing, you can force a failover. Because each instance in a DB cluster has its -// own endpoint address, make sure to clean up and re-establish any existing -// connections that use those endpoint addresses when the failover is complete. For -// more information on Amazon Aurora DB clusters, see What is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// cluster, after RDS terminates the primary DB instance, the internal monitoring +// system detects that the primary DB instance is unhealthy and promotes a readable +// standby (read-only instances) in the DB cluster to be the primary DB instance +// (the cluster writer). Failover times are typically less than 35 seconds. An +// Amazon Aurora DB cluster automatically fails over to an Aurora Replica, if one +// exists, when the primary DB instance fails. A Multi-AZ DB cluster automatically +// fails over to a readable standby DB instance when the primary DB instance fails. +// To simulate a failure of a primary instance for testing, you can force a +// failover. Because each instance in a DB cluster has its own endpoint address, +// make sure to clean up and re-establish any existing connections that use those +// endpoint addresses when the failover is complete. For more information on Amazon +// Aurora DB clusters, see What is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. For more information on Multi-AZ DB clusters, // see Multi-AZ DB cluster deployments (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) // in the Amazon RDS User Guide. diff --git a/service/rds/api_op_ModifyDBCluster.go b/service/rds/api_op_ModifyDBCluster.go index 41c8331a03f..5219240a46f 100644 --- a/service/rds/api_op_ModifyDBCluster.go +++ b/service/rds/api_op_ModifyDBCluster.go @@ -185,6 +185,11 @@ type ModifyDBClusterInput struct { // in the Amazon Aurora User Guide. Valid for Cluster Type: Aurora DB clusters only EnableIAMDatabaseAuthentication *bool + // Specifies whether to enable Aurora Limitless Database. You must enable Aurora + // Limitless Database to create a DB shard group. Valid for: Aurora DB clusters + // only + EnableLimitlessDatabase *bool + // Specifies whether read replicas can forward write operations to the writer DB // instance in the DB cluster. By default, write operations aren't allowed on // reader DB instances. Valid for: Aurora DB clusters only diff --git a/service/rds/api_op_ModifyDBShardGroup.go b/service/rds/api_op_ModifyDBShardGroup.go new file mode 100644 index 00000000000..d0c44e68997 --- /dev/null +++ b/service/rds/api_op_ModifyDBShardGroup.go @@ -0,0 +1,179 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package rds + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Modifies the settings of an Aurora Limitless Database DB shard group. You can +// change one or more settings by specifying these parameters and the new values in +// the request. +func (c *Client) ModifyDBShardGroup(ctx context.Context, params *ModifyDBShardGroupInput, optFns ...func(*Options)) (*ModifyDBShardGroupOutput, error) { + if params == nil { + params = &ModifyDBShardGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ModifyDBShardGroup", params, optFns, c.addOperationModifyDBShardGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ModifyDBShardGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ModifyDBShardGroupInput struct { + + // The name of the DB shard group to modify. + // + // This member is required. + DBShardGroupIdentifier *string + + // The maximum capacity of the DB shard group in Aurora capacity units (ACUs). + MaxACU *float64 + + noSmithyDocumentSerde +} + +type ModifyDBShardGroupOutput struct { + + // Specifies whether to create standby instances for the DB shard group. Valid + // values are the following: + // - 0 - Creates a single, primary DB instance for each physical shard. This is + // the default value, and the only one supported for the preview. + // - 1 - Creates a primary DB instance and a standby instance in a different + // Availability Zone (AZ) for each physical shard. + // - 2 - Creates a primary DB instance and two standby instances in different + // AZs for each physical shard. + ComputeRedundancy *int32 + + // The name of the primary DB cluster for the DB shard group. + DBClusterIdentifier *string + + // The name of the DB shard group. + DBShardGroupIdentifier *string + + // The Amazon Web Services Region-unique, immutable identifier for the DB shard + // group. + DBShardGroupResourceId *string + + // The connection endpoint for the DB shard group. + Endpoint *string + + // The maximum capacity of the DB shard group in Aurora capacity units (ACUs). + MaxACU *float64 + + // Indicates whether the DB shard group is publicly accessible. When the DB shard + // group is publicly accessible, its Domain Name System (DNS) endpoint resolves to + // the private IP address from within the DB shard group's virtual private cloud + // (VPC). It resolves to the public IP address from outside of the DB shard group's + // VPC. Access to the DB shard group is ultimately controlled by the security group + // it uses. That public access isn't permitted if the security group assigned to + // the DB shard group doesn't permit it. When the DB shard group isn't publicly + // accessible, it is an internal DB shard group with a DNS name that resolves to a + // private IP address. For more information, see CreateDBShardGroup . This setting + // is only for Aurora Limitless Database. + PubliclyAccessible *bool + + // The status of the DB shard group. + Status *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationModifyDBShardGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpModifyDBShardGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpModifyDBShardGroup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ModifyDBShardGroup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpModifyDBShardGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opModifyDBShardGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opModifyDBShardGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ModifyDBShardGroup", + } +} diff --git a/service/rds/api_op_RebootDBShardGroup.go b/service/rds/api_op_RebootDBShardGroup.go new file mode 100644 index 00000000000..08716449e7e --- /dev/null +++ b/service/rds/api_op_RebootDBShardGroup.go @@ -0,0 +1,177 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package rds + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// You might need to reboot your DB shard group, usually for maintenance reasons. +// For example, if you make certain modifications, reboot the DB shard group for +// the changes to take effect. This operation applies only to Aurora Limitless +// Database DBb shard groups. +func (c *Client) RebootDBShardGroup(ctx context.Context, params *RebootDBShardGroupInput, optFns ...func(*Options)) (*RebootDBShardGroupOutput, error) { + if params == nil { + params = &RebootDBShardGroupInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "RebootDBShardGroup", params, optFns, c.addOperationRebootDBShardGroupMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*RebootDBShardGroupOutput) + out.ResultMetadata = metadata + return out, nil +} + +type RebootDBShardGroupInput struct { + + // The name of the DB shard group to reboot. + // + // This member is required. + DBShardGroupIdentifier *string + + noSmithyDocumentSerde +} + +type RebootDBShardGroupOutput struct { + + // Specifies whether to create standby instances for the DB shard group. Valid + // values are the following: + // - 0 - Creates a single, primary DB instance for each physical shard. This is + // the default value, and the only one supported for the preview. + // - 1 - Creates a primary DB instance and a standby instance in a different + // Availability Zone (AZ) for each physical shard. + // - 2 - Creates a primary DB instance and two standby instances in different + // AZs for each physical shard. + ComputeRedundancy *int32 + + // The name of the primary DB cluster for the DB shard group. + DBClusterIdentifier *string + + // The name of the DB shard group. + DBShardGroupIdentifier *string + + // The Amazon Web Services Region-unique, immutable identifier for the DB shard + // group. + DBShardGroupResourceId *string + + // The connection endpoint for the DB shard group. + Endpoint *string + + // The maximum capacity of the DB shard group in Aurora capacity units (ACUs). + MaxACU *float64 + + // Indicates whether the DB shard group is publicly accessible. When the DB shard + // group is publicly accessible, its Domain Name System (DNS) endpoint resolves to + // the private IP address from within the DB shard group's virtual private cloud + // (VPC). It resolves to the public IP address from outside of the DB shard group's + // VPC. Access to the DB shard group is ultimately controlled by the security group + // it uses. That public access isn't permitted if the security group assigned to + // the DB shard group doesn't permit it. When the DB shard group isn't publicly + // accessible, it is an internal DB shard group with a DNS name that resolves to a + // private IP address. For more information, see CreateDBShardGroup . This setting + // is only for Aurora Limitless Database. + PubliclyAccessible *bool + + // The status of the DB shard group. + Status *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationRebootDBShardGroupMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsquery_serializeOpRebootDBShardGroup{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsquery_deserializeOpRebootDBShardGroup{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "RebootDBShardGroup"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpRebootDBShardGroupValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRebootDBShardGroup(options.Region), middleware.Before); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opRebootDBShardGroup(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "RebootDBShardGroup", + } +} diff --git a/service/rds/deserializers.go b/service/rds/deserializers.go index 459d0860bcb..849ff7913ff 100644 --- a/service/rds/deserializers.go +++ b/service/rds/deserializers.go @@ -3020,6 +3020,132 @@ func awsAwsquery_deserializeOpErrorCreateDBSecurityGroup(response *smithyhttp.Re } } +type awsAwsquery_deserializeOpCreateDBShardGroup struct { +} + +func (*awsAwsquery_deserializeOpCreateDBShardGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpCreateDBShardGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorCreateDBShardGroup(response, &metadata) + } + output := &CreateDBShardGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("CreateDBShardGroupResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentCreateDBShardGroupOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorCreateDBShardGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("DBClusterNotFoundFault", errorCode): + return awsAwsquery_deserializeErrorDBClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("DBShardGroupAlreadyExists", errorCode): + return awsAwsquery_deserializeErrorDBShardGroupAlreadyExistsFault(response, errorBody) + + case strings.EqualFold("InvalidDBClusterStateFault", errorCode): + return awsAwsquery_deserializeErrorInvalidDBClusterStateFault(response, errorBody) + + case strings.EqualFold("InvalidMaxAcu", errorCode): + return awsAwsquery_deserializeErrorInvalidMaxAcuFault(response, errorBody) + + case strings.EqualFold("InvalidVPCNetworkStateFault", errorCode): + return awsAwsquery_deserializeErrorInvalidVPCNetworkStateFault(response, errorBody) + + case strings.EqualFold("MaxDBShardGroupLimitReached", errorCode): + return awsAwsquery_deserializeErrorMaxDBShardGroupLimitReached(response, errorBody) + + case strings.EqualFold("UnsupportedDBEngineVersion", errorCode): + return awsAwsquery_deserializeErrorUnsupportedDBEngineVersionFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsquery_deserializeOpCreateDBSnapshot struct { } @@ -5216,6 +5342,120 @@ func awsAwsquery_deserializeOpErrorDeleteDBSecurityGroup(response *smithyhttp.Re } } +type awsAwsquery_deserializeOpDeleteDBShardGroup struct { +} + +func (*awsAwsquery_deserializeOpDeleteDBShardGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpDeleteDBShardGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorDeleteDBShardGroup(response, &metadata) + } + output := &DeleteDBShardGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("DeleteDBShardGroupResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentDeleteDBShardGroupOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorDeleteDBShardGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("DBShardGroupNotFound", errorCode): + return awsAwsquery_deserializeErrorDBShardGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidDBClusterStateFault", errorCode): + return awsAwsquery_deserializeErrorInvalidDBClusterStateFault(response, errorBody) + + case strings.EqualFold("InvalidDBShardGroupState", errorCode): + return awsAwsquery_deserializeErrorInvalidDBShardGroupStateFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsquery_deserializeOpDeleteDBSnapshot struct { } @@ -8545,14 +8785,14 @@ func awsAwsquery_deserializeOpErrorDescribeDBSecurityGroups(response *smithyhttp } } -type awsAwsquery_deserializeOpDescribeDBSnapshotAttributes struct { +type awsAwsquery_deserializeOpDescribeDBShardGroups struct { } -func (*awsAwsquery_deserializeOpDescribeDBSnapshotAttributes) ID() string { +func (*awsAwsquery_deserializeOpDescribeDBShardGroups) ID() string { return "OperationDeserializer" } -func (m *awsAwsquery_deserializeOpDescribeDBSnapshotAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsquery_deserializeOpDescribeDBShardGroups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8566,9 +8806,9 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshotAttributes) HandleDeserializ } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorDescribeDBSnapshotAttributes(response, &metadata) + return out, metadata, awsAwsquery_deserializeOpErrorDescribeDBShardGroups(response, &metadata) } - output := &DescribeDBSnapshotAttributesOutput{} + output := &DescribeDBShardGroupsOutput{} out.Result = output var buff [1024]byte @@ -8589,7 +8829,7 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshotAttributes) HandleDeserializ } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("DescribeDBSnapshotAttributesResult") + t, err = decoder.GetElement("DescribeDBShardGroupsResult") if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8601,7 +8841,7 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshotAttributes) HandleDeserializ } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentDescribeDBSnapshotAttributesOutput(&output, decoder) + err = awsAwsquery_deserializeOpDocumentDescribeDBShardGroupsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8615,7 +8855,7 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshotAttributes) HandleDeserializ return out, metadata, err } -func awsAwsquery_deserializeOpErrorDescribeDBSnapshotAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsquery_deserializeOpErrorDescribeDBShardGroups(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8640,8 +8880,11 @@ func awsAwsquery_deserializeOpErrorDescribeDBSnapshotAttributes(response *smithy } errorBody.Seek(0, io.SeekStart) switch { - case strings.EqualFold("DBSnapshotNotFound", errorCode): - return awsAwsquery_deserializeErrorDBSnapshotNotFoundFault(response, errorBody) + case strings.EqualFold("DBClusterNotFoundFault", errorCode): + return awsAwsquery_deserializeErrorDBClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("DBShardGroupNotFound", errorCode): + return awsAwsquery_deserializeErrorDBShardGroupNotFoundFault(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -8653,14 +8896,14 @@ func awsAwsquery_deserializeOpErrorDescribeDBSnapshotAttributes(response *smithy } } -type awsAwsquery_deserializeOpDescribeDBSnapshots struct { +type awsAwsquery_deserializeOpDescribeDBSnapshotAttributes struct { } -func (*awsAwsquery_deserializeOpDescribeDBSnapshots) ID() string { +func (*awsAwsquery_deserializeOpDescribeDBSnapshotAttributes) ID() string { return "OperationDeserializer" } -func (m *awsAwsquery_deserializeOpDescribeDBSnapshots) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsquery_deserializeOpDescribeDBSnapshotAttributes) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8674,9 +8917,9 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshots) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorDescribeDBSnapshots(response, &metadata) + return out, metadata, awsAwsquery_deserializeOpErrorDescribeDBSnapshotAttributes(response, &metadata) } - output := &DescribeDBSnapshotsOutput{} + output := &DescribeDBSnapshotAttributesOutput{} out.Result = output var buff [1024]byte @@ -8697,7 +8940,7 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshots) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("DescribeDBSnapshotsResult") + t, err = decoder.GetElement("DescribeDBSnapshotAttributesResult") if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8709,7 +8952,7 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshots) HandleDeserialize(ctx con } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentDescribeDBSnapshotsOutput(&output, decoder) + err = awsAwsquery_deserializeOpDocumentDescribeDBSnapshotAttributesOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8723,7 +8966,7 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshots) HandleDeserialize(ctx con return out, metadata, err } -func awsAwsquery_deserializeOpErrorDescribeDBSnapshots(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsquery_deserializeOpErrorDescribeDBSnapshotAttributes(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8761,14 +9004,14 @@ func awsAwsquery_deserializeOpErrorDescribeDBSnapshots(response *smithyhttp.Resp } } -type awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases struct { +type awsAwsquery_deserializeOpDescribeDBSnapshots struct { } -func (*awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases) ID() string { +func (*awsAwsquery_deserializeOpDescribeDBSnapshots) ID() string { return "OperationDeserializer" } -func (m *awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsquery_deserializeOpDescribeDBSnapshots) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -8782,9 +9025,9 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases) HandleDeser } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorDescribeDBSnapshotTenantDatabases(response, &metadata) + return out, metadata, awsAwsquery_deserializeOpErrorDescribeDBSnapshots(response, &metadata) } - output := &DescribeDBSnapshotTenantDatabasesOutput{} + output := &DescribeDBSnapshotsOutput{} out.Result = output var buff [1024]byte @@ -8805,7 +9048,7 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases) HandleDeser } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("DescribeDBSnapshotTenantDatabasesResult") + t, err = decoder.GetElement("DescribeDBSnapshotsResult") if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8817,7 +9060,7 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases) HandleDeser } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentDescribeDBSnapshotTenantDatabasesOutput(&output, decoder) + err = awsAwsquery_deserializeOpDocumentDescribeDBSnapshotsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -8831,7 +9074,115 @@ func (m *awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases) HandleDeser return out, metadata, err } -func awsAwsquery_deserializeOpErrorDescribeDBSnapshotTenantDatabases(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsquery_deserializeOpErrorDescribeDBSnapshots(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("DBSnapshotNotFound", errorCode): + return awsAwsquery_deserializeErrorDBSnapshotNotFoundFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases struct { +} + +func (*awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpDescribeDBSnapshotTenantDatabases) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorDescribeDBSnapshotTenantDatabases(response, &metadata) + } + output := &DescribeDBSnapshotTenantDatabasesOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("DescribeDBSnapshotTenantDatabasesResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentDescribeDBSnapshotTenantDatabasesOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorDescribeDBSnapshotTenantDatabases(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -13162,6 +13513,123 @@ func awsAwsquery_deserializeOpErrorModifyDBRecommendation(response *smithyhttp.R } } +type awsAwsquery_deserializeOpModifyDBShardGroup struct { +} + +func (*awsAwsquery_deserializeOpModifyDBShardGroup) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpModifyDBShardGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorModifyDBShardGroup(response, &metadata) + } + output := &ModifyDBShardGroupOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("ModifyDBShardGroupResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentModifyDBShardGroupOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorModifyDBShardGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("DBShardGroupAlreadyExists", errorCode): + return awsAwsquery_deserializeErrorDBShardGroupAlreadyExistsFault(response, errorBody) + + case strings.EqualFold("DBShardGroupNotFound", errorCode): + return awsAwsquery_deserializeErrorDBShardGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("InvalidDBClusterStateFault", errorCode): + return awsAwsquery_deserializeErrorInvalidDBClusterStateFault(response, errorBody) + + case strings.EqualFold("InvalidMaxAcu", errorCode): + return awsAwsquery_deserializeErrorInvalidMaxAcuFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsquery_deserializeOpModifyDBSnapshot struct { } @@ -14533,14 +15001,14 @@ func awsAwsquery_deserializeOpErrorRebootDBInstance(response *smithyhttp.Respons } } -type awsAwsquery_deserializeOpRegisterDBProxyTargets struct { +type awsAwsquery_deserializeOpRebootDBShardGroup struct { } -func (*awsAwsquery_deserializeOpRegisterDBProxyTargets) ID() string { +func (*awsAwsquery_deserializeOpRebootDBShardGroup) ID() string { return "OperationDeserializer" } -func (m *awsAwsquery_deserializeOpRegisterDBProxyTargets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsquery_deserializeOpRebootDBShardGroup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14554,9 +15022,9 @@ func (m *awsAwsquery_deserializeOpRegisterDBProxyTargets) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorRegisterDBProxyTargets(response, &metadata) + return out, metadata, awsAwsquery_deserializeOpErrorRebootDBShardGroup(response, &metadata) } - output := &RegisterDBProxyTargetsOutput{} + output := &RebootDBShardGroupOutput{} out.Result = output var buff [1024]byte @@ -14577,7 +15045,7 @@ func (m *awsAwsquery_deserializeOpRegisterDBProxyTargets) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("RegisterDBProxyTargetsResult") + t, err = decoder.GetElement("RebootDBShardGroupResult") if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14589,7 +15057,7 @@ func (m *awsAwsquery_deserializeOpRegisterDBProxyTargets) HandleDeserialize(ctx } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentRegisterDBProxyTargetsOutput(&output, decoder) + err = awsAwsquery_deserializeOpDocumentRebootDBShardGroupOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14603,7 +15071,7 @@ func (m *awsAwsquery_deserializeOpRegisterDBProxyTargets) HandleDeserialize(ctx return out, metadata, err } -func awsAwsquery_deserializeOpErrorRegisterDBProxyTargets(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsquery_deserializeOpErrorRebootDBShardGroup(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -14628,32 +15096,11 @@ func awsAwsquery_deserializeOpErrorRegisterDBProxyTargets(response *smithyhttp.R } errorBody.Seek(0, io.SeekStart) switch { - case strings.EqualFold("DBClusterNotFoundFault", errorCode): - return awsAwsquery_deserializeErrorDBClusterNotFoundFault(response, errorBody) - - case strings.EqualFold("DBInstanceNotFound", errorCode): - return awsAwsquery_deserializeErrorDBInstanceNotFoundFault(response, errorBody) + case strings.EqualFold("DBShardGroupNotFound", errorCode): + return awsAwsquery_deserializeErrorDBShardGroupNotFoundFault(response, errorBody) - case strings.EqualFold("DBProxyNotFoundFault", errorCode): - return awsAwsquery_deserializeErrorDBProxyNotFoundFault(response, errorBody) - - case strings.EqualFold("DBProxyTargetAlreadyRegisteredFault", errorCode): - return awsAwsquery_deserializeErrorDBProxyTargetAlreadyRegisteredFault(response, errorBody) - - case strings.EqualFold("DBProxyTargetGroupNotFoundFault", errorCode): - return awsAwsquery_deserializeErrorDBProxyTargetGroupNotFoundFault(response, errorBody) - - case strings.EqualFold("InsufficientAvailableIPsInSubnetFault", errorCode): - return awsAwsquery_deserializeErrorInsufficientAvailableIPsInSubnetFault(response, errorBody) - - case strings.EqualFold("InvalidDBClusterStateFault", errorCode): - return awsAwsquery_deserializeErrorInvalidDBClusterStateFault(response, errorBody) - - case strings.EqualFold("InvalidDBInstanceState", errorCode): - return awsAwsquery_deserializeErrorInvalidDBInstanceStateFault(response, errorBody) - - case strings.EqualFold("InvalidDBProxyStateFault", errorCode): - return awsAwsquery_deserializeErrorInvalidDBProxyStateFault(response, errorBody) + case strings.EqualFold("InvalidDBShardGroupState", errorCode): + return awsAwsquery_deserializeErrorInvalidDBShardGroupStateFault(response, errorBody) default: genericError := &smithy.GenericAPIError{ @@ -14665,14 +15112,14 @@ func awsAwsquery_deserializeOpErrorRegisterDBProxyTargets(response *smithyhttp.R } } -type awsAwsquery_deserializeOpRemoveFromGlobalCluster struct { +type awsAwsquery_deserializeOpRegisterDBProxyTargets struct { } -func (*awsAwsquery_deserializeOpRemoveFromGlobalCluster) ID() string { +func (*awsAwsquery_deserializeOpRegisterDBProxyTargets) ID() string { return "OperationDeserializer" } -func (m *awsAwsquery_deserializeOpRemoveFromGlobalCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsAwsquery_deserializeOpRegisterDBProxyTargets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -14686,9 +15133,9 @@ func (m *awsAwsquery_deserializeOpRemoveFromGlobalCluster) HandleDeserialize(ctx } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsAwsquery_deserializeOpErrorRemoveFromGlobalCluster(response, &metadata) + return out, metadata, awsAwsquery_deserializeOpErrorRegisterDBProxyTargets(response, &metadata) } - output := &RemoveFromGlobalClusterOutput{} + output := &RegisterDBProxyTargetsOutput{} out.Result = output var buff [1024]byte @@ -14709,7 +15156,7 @@ func (m *awsAwsquery_deserializeOpRemoveFromGlobalCluster) HandleDeserialize(ctx } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("RemoveFromGlobalClusterResult") + t, err = decoder.GetElement("RegisterDBProxyTargetsResult") if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14721,7 +15168,7 @@ func (m *awsAwsquery_deserializeOpRemoveFromGlobalCluster) HandleDeserialize(ctx } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeOpDocumentRemoveFromGlobalClusterOutput(&output, decoder) + err = awsAwsquery_deserializeOpDocumentRegisterDBProxyTargetsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -14735,7 +15182,139 @@ func (m *awsAwsquery_deserializeOpRemoveFromGlobalCluster) HandleDeserialize(ctx return out, metadata, err } -func awsAwsquery_deserializeOpErrorRemoveFromGlobalCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsAwsquery_deserializeOpErrorRegisterDBProxyTargets(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false) + if err != nil { + return err + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("DBClusterNotFoundFault", errorCode): + return awsAwsquery_deserializeErrorDBClusterNotFoundFault(response, errorBody) + + case strings.EqualFold("DBInstanceNotFound", errorCode): + return awsAwsquery_deserializeErrorDBInstanceNotFoundFault(response, errorBody) + + case strings.EqualFold("DBProxyNotFoundFault", errorCode): + return awsAwsquery_deserializeErrorDBProxyNotFoundFault(response, errorBody) + + case strings.EqualFold("DBProxyTargetAlreadyRegisteredFault", errorCode): + return awsAwsquery_deserializeErrorDBProxyTargetAlreadyRegisteredFault(response, errorBody) + + case strings.EqualFold("DBProxyTargetGroupNotFoundFault", errorCode): + return awsAwsquery_deserializeErrorDBProxyTargetGroupNotFoundFault(response, errorBody) + + case strings.EqualFold("InsufficientAvailableIPsInSubnetFault", errorCode): + return awsAwsquery_deserializeErrorInsufficientAvailableIPsInSubnetFault(response, errorBody) + + case strings.EqualFold("InvalidDBClusterStateFault", errorCode): + return awsAwsquery_deserializeErrorInvalidDBClusterStateFault(response, errorBody) + + case strings.EqualFold("InvalidDBInstanceState", errorCode): + return awsAwsquery_deserializeErrorInvalidDBInstanceStateFault(response, errorBody) + + case strings.EqualFold("InvalidDBProxyStateFault", errorCode): + return awsAwsquery_deserializeErrorInvalidDBProxyStateFault(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +type awsAwsquery_deserializeOpRemoveFromGlobalCluster struct { +} + +func (*awsAwsquery_deserializeOpRemoveFromGlobalCluster) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsquery_deserializeOpRemoveFromGlobalCluster) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsquery_deserializeOpErrorRemoveFromGlobalCluster(response, &metadata) + } + output := &RemoveFromGlobalClusterOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("RemoveFromGlobalClusterResult") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeOpDocumentRemoveFromGlobalClusterOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsquery_deserializeOpErrorRemoveFromGlobalCluster(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -20143,6 +20722,94 @@ func awsAwsquery_deserializeErrorDBSecurityGroupQuotaExceededFault(response *smi return output } +func awsAwsquery_deserializeErrorDBShardGroupAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.DBShardGroupAlreadyExistsFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentDBShardGroupAlreadyExistsFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorDBShardGroupNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.DBShardGroupNotFoundFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentDBShardGroupNotFoundFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + func awsAwsquery_deserializeErrorDBSnapshotAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.DBSnapshotAlreadyExistsFault{} var buff [1024]byte @@ -21991,6 +22658,50 @@ func awsAwsquery_deserializeErrorInvalidDBSecurityGroupStateFault(response *smit return output } +func awsAwsquery_deserializeErrorInvalidDBShardGroupStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidDBShardGroupStateFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentInvalidDBShardGroupStateFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + func awsAwsquery_deserializeErrorInvalidDBSnapshotStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { output := &types.InvalidDBSnapshotStateFault{} var buff [1024]byte @@ -22431,8 +23142,8 @@ func awsAwsquery_deserializeErrorInvalidIntegrationStateFault(response *smithyht return output } -func awsAwsquery_deserializeErrorInvalidOptionGroupStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidOptionGroupStateFault{} +func awsAwsquery_deserializeErrorInvalidMaxAcuFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidMaxAcuFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -22462,183 +23173,7 @@ func awsAwsquery_deserializeErrorInvalidOptionGroupStateFault(response *smithyht } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidOptionGroupStateFault(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidResourceStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidResourceStateFault{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidResourceStateFault(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidRestoreFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidRestoreFault{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidRestoreFault(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidS3BucketFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidS3BucketFault{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidS3BucketFault(&output, decoder) - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - return output -} - -func awsAwsquery_deserializeErrorInvalidSubnet(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidSubnet{} - var buff [1024]byte - ringBuffer := smithyio.NewRingBuffer(buff[:]) - body := io.TeeReader(errorBody, ringBuffer) - rootDecoder := xml.NewDecoder(body) - t, err := smithyxml.FetchRootElement(rootDecoder) - if err == io.EOF { - return output - } - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - t, err = decoder.GetElement("Error") - if err != nil { - var snapshot bytes.Buffer - io.Copy(&snapshot, ringBuffer) - return &smithy.DeserializationError{ - Err: fmt.Errorf("failed to decode response body, %w", err), - Snapshot: snapshot.Bytes(), - } - } - - decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidSubnet(&output, decoder) + err = awsAwsquery_deserializeDocumentInvalidMaxAcuFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22651,8 +23186,8 @@ func awsAwsquery_deserializeErrorInvalidSubnet(response *smithyhttp.Response, er return output } -func awsAwsquery_deserializeErrorInvalidVPCNetworkStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.InvalidVPCNetworkStateFault{} +func awsAwsquery_deserializeErrorInvalidOptionGroupStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidOptionGroupStateFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -22682,7 +23217,7 @@ func awsAwsquery_deserializeErrorInvalidVPCNetworkStateFault(response *smithyhtt } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentInvalidVPCNetworkStateFault(&output, decoder) + err = awsAwsquery_deserializeDocumentInvalidOptionGroupStateFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22695,8 +23230,8 @@ func awsAwsquery_deserializeErrorInvalidVPCNetworkStateFault(response *smithyhtt return output } -func awsAwsquery_deserializeErrorKMSKeyNotAccessibleFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.KMSKeyNotAccessibleFault{} +func awsAwsquery_deserializeErrorInvalidResourceStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidResourceStateFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -22726,7 +23261,7 @@ func awsAwsquery_deserializeErrorKMSKeyNotAccessibleFault(response *smithyhttp.R } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentKMSKeyNotAccessibleFault(&output, decoder) + err = awsAwsquery_deserializeDocumentInvalidResourceStateFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22739,8 +23274,8 @@ func awsAwsquery_deserializeErrorKMSKeyNotAccessibleFault(response *smithyhttp.R return output } -func awsAwsquery_deserializeErrorNetworkTypeNotSupported(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.NetworkTypeNotSupported{} +func awsAwsquery_deserializeErrorInvalidRestoreFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidRestoreFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -22770,7 +23305,7 @@ func awsAwsquery_deserializeErrorNetworkTypeNotSupported(response *smithyhttp.Re } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentNetworkTypeNotSupported(&output, decoder) + err = awsAwsquery_deserializeDocumentInvalidRestoreFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22783,8 +23318,8 @@ func awsAwsquery_deserializeErrorNetworkTypeNotSupported(response *smithyhttp.Re return output } -func awsAwsquery_deserializeErrorOptionGroupAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.OptionGroupAlreadyExistsFault{} +func awsAwsquery_deserializeErrorInvalidS3BucketFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidS3BucketFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -22814,7 +23349,7 @@ func awsAwsquery_deserializeErrorOptionGroupAlreadyExistsFault(response *smithyh } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentOptionGroupAlreadyExistsFault(&output, decoder) + err = awsAwsquery_deserializeDocumentInvalidS3BucketFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22827,8 +23362,8 @@ func awsAwsquery_deserializeErrorOptionGroupAlreadyExistsFault(response *smithyh return output } -func awsAwsquery_deserializeErrorOptionGroupNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.OptionGroupNotFoundFault{} +func awsAwsquery_deserializeErrorInvalidSubnet(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidSubnet{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -22858,7 +23393,7 @@ func awsAwsquery_deserializeErrorOptionGroupNotFoundFault(response *smithyhttp.R } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentOptionGroupNotFoundFault(&output, decoder) + err = awsAwsquery_deserializeDocumentInvalidSubnet(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22871,8 +23406,8 @@ func awsAwsquery_deserializeErrorOptionGroupNotFoundFault(response *smithyhttp.R return output } -func awsAwsquery_deserializeErrorOptionGroupQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.OptionGroupQuotaExceededFault{} +func awsAwsquery_deserializeErrorInvalidVPCNetworkStateFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.InvalidVPCNetworkStateFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -22902,7 +23437,7 @@ func awsAwsquery_deserializeErrorOptionGroupQuotaExceededFault(response *smithyh } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentOptionGroupQuotaExceededFault(&output, decoder) + err = awsAwsquery_deserializeDocumentInvalidVPCNetworkStateFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22915,8 +23450,8 @@ func awsAwsquery_deserializeErrorOptionGroupQuotaExceededFault(response *smithyh return output } -func awsAwsquery_deserializeErrorPointInTimeRestoreNotEnabledFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.PointInTimeRestoreNotEnabledFault{} +func awsAwsquery_deserializeErrorKMSKeyNotAccessibleFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.KMSKeyNotAccessibleFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -22946,7 +23481,7 @@ func awsAwsquery_deserializeErrorPointInTimeRestoreNotEnabledFault(response *smi } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentPointInTimeRestoreNotEnabledFault(&output, decoder) + err = awsAwsquery_deserializeDocumentKMSKeyNotAccessibleFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -22959,8 +23494,8 @@ func awsAwsquery_deserializeErrorPointInTimeRestoreNotEnabledFault(response *smi return output } -func awsAwsquery_deserializeErrorProvisionedIopsNotAvailableInAZFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ProvisionedIopsNotAvailableInAZFault{} +func awsAwsquery_deserializeErrorMaxDBShardGroupLimitReached(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.MaxDBShardGroupLimitReached{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -22990,7 +23525,7 @@ func awsAwsquery_deserializeErrorProvisionedIopsNotAvailableInAZFault(response * } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentProvisionedIopsNotAvailableInAZFault(&output, decoder) + err = awsAwsquery_deserializeDocumentMaxDBShardGroupLimitReached(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23003,8 +23538,8 @@ func awsAwsquery_deserializeErrorProvisionedIopsNotAvailableInAZFault(response * return output } -func awsAwsquery_deserializeErrorReservedDBInstanceAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ReservedDBInstanceAlreadyExistsFault{} +func awsAwsquery_deserializeErrorNetworkTypeNotSupported(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.NetworkTypeNotSupported{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -23034,7 +23569,7 @@ func awsAwsquery_deserializeErrorReservedDBInstanceAlreadyExistsFault(response * } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentReservedDBInstanceAlreadyExistsFault(&output, decoder) + err = awsAwsquery_deserializeDocumentNetworkTypeNotSupported(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23047,8 +23582,8 @@ func awsAwsquery_deserializeErrorReservedDBInstanceAlreadyExistsFault(response * return output } -func awsAwsquery_deserializeErrorReservedDBInstanceNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ReservedDBInstanceNotFoundFault{} +func awsAwsquery_deserializeErrorOptionGroupAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.OptionGroupAlreadyExistsFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -23078,7 +23613,7 @@ func awsAwsquery_deserializeErrorReservedDBInstanceNotFoundFault(response *smith } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentReservedDBInstanceNotFoundFault(&output, decoder) + err = awsAwsquery_deserializeDocumentOptionGroupAlreadyExistsFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23091,8 +23626,8 @@ func awsAwsquery_deserializeErrorReservedDBInstanceNotFoundFault(response *smith return output } -func awsAwsquery_deserializeErrorReservedDBInstanceQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ReservedDBInstanceQuotaExceededFault{} +func awsAwsquery_deserializeErrorOptionGroupNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.OptionGroupNotFoundFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -23122,7 +23657,7 @@ func awsAwsquery_deserializeErrorReservedDBInstanceQuotaExceededFault(response * } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentReservedDBInstanceQuotaExceededFault(&output, decoder) + err = awsAwsquery_deserializeDocumentOptionGroupNotFoundFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23135,8 +23670,8 @@ func awsAwsquery_deserializeErrorReservedDBInstanceQuotaExceededFault(response * return output } -func awsAwsquery_deserializeErrorReservedDBInstancesOfferingNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ReservedDBInstancesOfferingNotFoundFault{} +func awsAwsquery_deserializeErrorOptionGroupQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.OptionGroupQuotaExceededFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -23166,7 +23701,7 @@ func awsAwsquery_deserializeErrorReservedDBInstancesOfferingNotFoundFault(respon } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentReservedDBInstancesOfferingNotFoundFault(&output, decoder) + err = awsAwsquery_deserializeDocumentOptionGroupQuotaExceededFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23179,8 +23714,8 @@ func awsAwsquery_deserializeErrorReservedDBInstancesOfferingNotFoundFault(respon return output } -func awsAwsquery_deserializeErrorResourceNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.ResourceNotFoundFault{} +func awsAwsquery_deserializeErrorPointInTimeRestoreNotEnabledFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.PointInTimeRestoreNotEnabledFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -23210,7 +23745,7 @@ func awsAwsquery_deserializeErrorResourceNotFoundFault(response *smithyhttp.Resp } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentResourceNotFoundFault(&output, decoder) + err = awsAwsquery_deserializeDocumentPointInTimeRestoreNotEnabledFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23223,8 +23758,8 @@ func awsAwsquery_deserializeErrorResourceNotFoundFault(response *smithyhttp.Resp return output } -func awsAwsquery_deserializeErrorSharedSnapshotQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.SharedSnapshotQuotaExceededFault{} +func awsAwsquery_deserializeErrorProvisionedIopsNotAvailableInAZFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ProvisionedIopsNotAvailableInAZFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -23254,7 +23789,7 @@ func awsAwsquery_deserializeErrorSharedSnapshotQuotaExceededFault(response *smit } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentSharedSnapshotQuotaExceededFault(&output, decoder) + err = awsAwsquery_deserializeDocumentProvisionedIopsNotAvailableInAZFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23267,8 +23802,8 @@ func awsAwsquery_deserializeErrorSharedSnapshotQuotaExceededFault(response *smit return output } -func awsAwsquery_deserializeErrorSnapshotQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.SnapshotQuotaExceededFault{} +func awsAwsquery_deserializeErrorReservedDBInstanceAlreadyExistsFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ReservedDBInstanceAlreadyExistsFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -23298,7 +23833,7 @@ func awsAwsquery_deserializeErrorSnapshotQuotaExceededFault(response *smithyhttp } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentSnapshotQuotaExceededFault(&output, decoder) + err = awsAwsquery_deserializeDocumentReservedDBInstanceAlreadyExistsFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23311,8 +23846,8 @@ func awsAwsquery_deserializeErrorSnapshotQuotaExceededFault(response *smithyhttp return output } -func awsAwsquery_deserializeErrorSNSInvalidTopicFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.SNSInvalidTopicFault{} +func awsAwsquery_deserializeErrorReservedDBInstanceNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ReservedDBInstanceNotFoundFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -23342,7 +23877,7 @@ func awsAwsquery_deserializeErrorSNSInvalidTopicFault(response *smithyhttp.Respo } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentSNSInvalidTopicFault(&output, decoder) + err = awsAwsquery_deserializeDocumentReservedDBInstanceNotFoundFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23355,8 +23890,8 @@ func awsAwsquery_deserializeErrorSNSInvalidTopicFault(response *smithyhttp.Respo return output } -func awsAwsquery_deserializeErrorSNSNoAuthorizationFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.SNSNoAuthorizationFault{} +func awsAwsquery_deserializeErrorReservedDBInstanceQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ReservedDBInstanceQuotaExceededFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -23386,7 +23921,7 @@ func awsAwsquery_deserializeErrorSNSNoAuthorizationFault(response *smithyhttp.Re } decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) - err = awsAwsquery_deserializeDocumentSNSNoAuthorizationFault(&output, decoder) + err = awsAwsquery_deserializeDocumentReservedDBInstanceQuotaExceededFault(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -23399,8 +23934,272 @@ func awsAwsquery_deserializeErrorSNSNoAuthorizationFault(response *smithyhttp.Re return output } -func awsAwsquery_deserializeErrorSNSTopicArnNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { - output := &types.SNSTopicArnNotFoundFault{} +func awsAwsquery_deserializeErrorReservedDBInstancesOfferingNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ReservedDBInstancesOfferingNotFoundFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentReservedDBInstancesOfferingNotFoundFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorResourceNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.ResourceNotFoundFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentResourceNotFoundFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorSharedSnapshotQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SharedSnapshotQuotaExceededFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentSharedSnapshotQuotaExceededFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorSnapshotQuotaExceededFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SnapshotQuotaExceededFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentSnapshotQuotaExceededFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorSNSInvalidTopicFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SNSInvalidTopicFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentSNSInvalidTopicFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorSNSNoAuthorizationFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SNSNoAuthorizationFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentSNSNoAuthorizationFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + +func awsAwsquery_deserializeErrorSNSTopicArnNotFoundFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.SNSTopicArnNotFoundFault{} var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(errorBody, ringBuffer) @@ -24015,6 +24814,50 @@ func awsAwsquery_deserializeErrorTenantDatabaseQuotaExceededFault(response *smit return output } +func awsAwsquery_deserializeErrorUnsupportedDBEngineVersionFault(response *smithyhttp.Response, errorBody *bytes.Reader) error { + output := &types.UnsupportedDBEngineVersionFault{} + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(errorBody, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return output + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + t, err = decoder.GetElement("Error") + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t) + err = awsAwsquery_deserializeDocumentUnsupportedDBEngineVersionFault(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return output +} + func awsAwsquery_deserializeDocumentAccountQuota(v **types.AccountQuota, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -27180,6 +28023,12 @@ func awsAwsquery_deserializeDocumentDBCluster(v **types.DBCluster, decoder smith sv.LatestRestorableTime = ptr.Time(t) } + case strings.EqualFold("LimitlessDatabase", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentLimitlessDatabase(&sv.LimitlessDatabase, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("LocalWriteForwardingStatus", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -36074,6 +36923,323 @@ func awsAwsquery_deserializeDocumentDBSecurityGroupsUnwrapped(v *[]types.DBSecur *v = sv return nil } +func awsAwsquery_deserializeDocumentDBShardGroup(v **types.DBShardGroup, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DBShardGroup + if *v == nil { + sv = &types.DBShardGroup{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ComputeRedundancy", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ComputeRedundancy = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("DBClusterIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBClusterIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupResourceId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupResourceId = ptr.String(xtv) + } + + case strings.EqualFold("Endpoint", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Endpoint = ptr.String(xtv) + } + + case strings.EqualFold("MaxACU", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + f64, err := strconv.ParseFloat(xtv, 64) + if err != nil { + return err + } + sv.MaxACU = ptr.Float64(f64) + } + + case strings.EqualFold("PubliclyAccessible", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", val) + } + sv.PubliclyAccessible = ptr.Bool(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentDBShardGroupAlreadyExistsFault(v **types.DBShardGroupAlreadyExistsFault, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DBShardGroupAlreadyExistsFault + if *v == nil { + sv = &types.DBShardGroupAlreadyExistsFault{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentDBShardGroupNotFoundFault(v **types.DBShardGroupNotFoundFault, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.DBShardGroupNotFoundFault + if *v == nil { + sv = &types.DBShardGroupNotFoundFault{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentDBShardGroupsList(v *[]types.DBShardGroup, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.DBShardGroup + if *v == nil { + sv = make([]types.DBShardGroup, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("DBShardGroup", t.Name.Local): + var col types.DBShardGroup + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsAwsquery_deserializeDocumentDBShardGroup(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentDBShardGroupsListUnwrapped(v *[]types.DBShardGroup, decoder smithyxml.NodeDecoder) error { + var sv []types.DBShardGroup + if *v == nil { + sv = make([]types.DBShardGroup, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.DBShardGroup + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsAwsquery_deserializeDocumentDBShardGroup(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} func awsAwsquery_deserializeDocumentDBSnapshot(v **types.DBSnapshot, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -42307,13 +43473,13 @@ func awsAwsquery_deserializeDocumentInvalidDBSecurityGroupStateFault(v **types.I return nil } -func awsAwsquery_deserializeDocumentInvalidDBSnapshotStateFault(v **types.InvalidDBSnapshotStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidDBShardGroupStateFault(v **types.InvalidDBShardGroupStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidDBSnapshotStateFault + var sv *types.InvalidDBShardGroupStateFault if *v == nil { - sv = &types.InvalidDBSnapshotStateFault{} + sv = &types.InvalidDBShardGroupStateFault{} } else { sv = *v } @@ -42356,13 +43522,13 @@ func awsAwsquery_deserializeDocumentInvalidDBSnapshotStateFault(v **types.Invali return nil } -func awsAwsquery_deserializeDocumentInvalidDBSubnetGroupFault(v **types.InvalidDBSubnetGroupFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidDBSnapshotStateFault(v **types.InvalidDBSnapshotStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidDBSubnetGroupFault + var sv *types.InvalidDBSnapshotStateFault if *v == nil { - sv = &types.InvalidDBSubnetGroupFault{} + sv = &types.InvalidDBSnapshotStateFault{} } else { sv = *v } @@ -42405,13 +43571,13 @@ func awsAwsquery_deserializeDocumentInvalidDBSubnetGroupFault(v **types.InvalidD return nil } -func awsAwsquery_deserializeDocumentInvalidDBSubnetGroupStateFault(v **types.InvalidDBSubnetGroupStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidDBSubnetGroupFault(v **types.InvalidDBSubnetGroupFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidDBSubnetGroupStateFault + var sv *types.InvalidDBSubnetGroupFault if *v == nil { - sv = &types.InvalidDBSubnetGroupStateFault{} + sv = &types.InvalidDBSubnetGroupFault{} } else { sv = *v } @@ -42454,13 +43620,13 @@ func awsAwsquery_deserializeDocumentInvalidDBSubnetGroupStateFault(v **types.Inv return nil } -func awsAwsquery_deserializeDocumentInvalidDBSubnetStateFault(v **types.InvalidDBSubnetStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidDBSubnetGroupStateFault(v **types.InvalidDBSubnetGroupStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidDBSubnetStateFault + var sv *types.InvalidDBSubnetGroupStateFault if *v == nil { - sv = &types.InvalidDBSubnetStateFault{} + sv = &types.InvalidDBSubnetGroupStateFault{} } else { sv = *v } @@ -42503,13 +43669,13 @@ func awsAwsquery_deserializeDocumentInvalidDBSubnetStateFault(v **types.InvalidD return nil } -func awsAwsquery_deserializeDocumentInvalidEventSubscriptionStateFault(v **types.InvalidEventSubscriptionStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidDBSubnetStateFault(v **types.InvalidDBSubnetStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidEventSubscriptionStateFault + var sv *types.InvalidDBSubnetStateFault if *v == nil { - sv = &types.InvalidEventSubscriptionStateFault{} + sv = &types.InvalidDBSubnetStateFault{} } else { sv = *v } @@ -42552,13 +43718,13 @@ func awsAwsquery_deserializeDocumentInvalidEventSubscriptionStateFault(v **types return nil } -func awsAwsquery_deserializeDocumentInvalidExportOnlyFault(v **types.InvalidExportOnlyFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidEventSubscriptionStateFault(v **types.InvalidEventSubscriptionStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidExportOnlyFault + var sv *types.InvalidEventSubscriptionStateFault if *v == nil { - sv = &types.InvalidExportOnlyFault{} + sv = &types.InvalidEventSubscriptionStateFault{} } else { sv = *v } @@ -42601,13 +43767,13 @@ func awsAwsquery_deserializeDocumentInvalidExportOnlyFault(v **types.InvalidExpo return nil } -func awsAwsquery_deserializeDocumentInvalidExportSourceStateFault(v **types.InvalidExportSourceStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidExportOnlyFault(v **types.InvalidExportOnlyFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidExportSourceStateFault + var sv *types.InvalidExportOnlyFault if *v == nil { - sv = &types.InvalidExportSourceStateFault{} + sv = &types.InvalidExportOnlyFault{} } else { sv = *v } @@ -42650,13 +43816,13 @@ func awsAwsquery_deserializeDocumentInvalidExportSourceStateFault(v **types.Inva return nil } -func awsAwsquery_deserializeDocumentInvalidExportTaskStateFault(v **types.InvalidExportTaskStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidExportSourceStateFault(v **types.InvalidExportSourceStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidExportTaskStateFault + var sv *types.InvalidExportSourceStateFault if *v == nil { - sv = &types.InvalidExportTaskStateFault{} + sv = &types.InvalidExportSourceStateFault{} } else { sv = *v } @@ -42699,13 +43865,13 @@ func awsAwsquery_deserializeDocumentInvalidExportTaskStateFault(v **types.Invali return nil } -func awsAwsquery_deserializeDocumentInvalidGlobalClusterStateFault(v **types.InvalidGlobalClusterStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidExportTaskStateFault(v **types.InvalidExportTaskStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidGlobalClusterStateFault + var sv *types.InvalidExportTaskStateFault if *v == nil { - sv = &types.InvalidGlobalClusterStateFault{} + sv = &types.InvalidExportTaskStateFault{} } else { sv = *v } @@ -42748,13 +43914,13 @@ func awsAwsquery_deserializeDocumentInvalidGlobalClusterStateFault(v **types.Inv return nil } -func awsAwsquery_deserializeDocumentInvalidIntegrationStateFault(v **types.InvalidIntegrationStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidGlobalClusterStateFault(v **types.InvalidGlobalClusterStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidIntegrationStateFault + var sv *types.InvalidGlobalClusterStateFault if *v == nil { - sv = &types.InvalidIntegrationStateFault{} + sv = &types.InvalidGlobalClusterStateFault{} } else { sv = *v } @@ -42797,13 +43963,13 @@ func awsAwsquery_deserializeDocumentInvalidIntegrationStateFault(v **types.Inval return nil } -func awsAwsquery_deserializeDocumentInvalidOptionGroupStateFault(v **types.InvalidOptionGroupStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidIntegrationStateFault(v **types.InvalidIntegrationStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidOptionGroupStateFault + var sv *types.InvalidIntegrationStateFault if *v == nil { - sv = &types.InvalidOptionGroupStateFault{} + sv = &types.InvalidIntegrationStateFault{} } else { sv = *v } @@ -42846,13 +44012,13 @@ func awsAwsquery_deserializeDocumentInvalidOptionGroupStateFault(v **types.Inval return nil } -func awsAwsquery_deserializeDocumentInvalidResourceStateFault(v **types.InvalidResourceStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidMaxAcuFault(v **types.InvalidMaxAcuFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidResourceStateFault + var sv *types.InvalidMaxAcuFault if *v == nil { - sv = &types.InvalidResourceStateFault{} + sv = &types.InvalidMaxAcuFault{} } else { sv = *v } @@ -42895,13 +44061,13 @@ func awsAwsquery_deserializeDocumentInvalidResourceStateFault(v **types.InvalidR return nil } -func awsAwsquery_deserializeDocumentInvalidRestoreFault(v **types.InvalidRestoreFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidOptionGroupStateFault(v **types.InvalidOptionGroupStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidRestoreFault + var sv *types.InvalidOptionGroupStateFault if *v == nil { - sv = &types.InvalidRestoreFault{} + sv = &types.InvalidOptionGroupStateFault{} } else { sv = *v } @@ -42944,13 +44110,13 @@ func awsAwsquery_deserializeDocumentInvalidRestoreFault(v **types.InvalidRestore return nil } -func awsAwsquery_deserializeDocumentInvalidS3BucketFault(v **types.InvalidS3BucketFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidResourceStateFault(v **types.InvalidResourceStateFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidS3BucketFault + var sv *types.InvalidResourceStateFault if *v == nil { - sv = &types.InvalidS3BucketFault{} + sv = &types.InvalidResourceStateFault{} } else { sv = *v } @@ -42993,13 +44159,13 @@ func awsAwsquery_deserializeDocumentInvalidS3BucketFault(v **types.InvalidS3Buck return nil } -func awsAwsquery_deserializeDocumentInvalidSubnet(v **types.InvalidSubnet, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidRestoreFault(v **types.InvalidRestoreFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidSubnet + var sv *types.InvalidRestoreFault if *v == nil { - sv = &types.InvalidSubnet{} + sv = &types.InvalidRestoreFault{} } else { sv = *v } @@ -43042,13 +44208,13 @@ func awsAwsquery_deserializeDocumentInvalidSubnet(v **types.InvalidSubnet, decod return nil } -func awsAwsquery_deserializeDocumentInvalidVPCNetworkStateFault(v **types.InvalidVPCNetworkStateFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidS3BucketFault(v **types.InvalidS3BucketFault, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.InvalidVPCNetworkStateFault + var sv *types.InvalidS3BucketFault if *v == nil { - sv = &types.InvalidVPCNetworkStateFault{} + sv = &types.InvalidS3BucketFault{} } else { sv = *v } @@ -43091,185 +44257,13 @@ func awsAwsquery_deserializeDocumentInvalidVPCNetworkStateFault(v **types.Invali return nil } -func awsAwsquery_deserializeDocumentIPRange(v **types.IPRange, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IPRange - if *v == nil { - sv = &types.IPRange{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("CIDRIP", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.CIDRIP = ptr.String(xtv) - } - - case strings.EqualFold("Status", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Status = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentIPRangeList(v *[]types.IPRange, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.IPRange - if *v == nil { - sv = make([]types.IPRange, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("IPRange", t.Name.Local): - var col types.IPRange - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsAwsquery_deserializeDocumentIPRange(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentIPRangeListUnwrapped(v *[]types.IPRange, decoder smithyxml.NodeDecoder) error { - var sv []types.IPRange - if *v == nil { - sv = make([]types.IPRange, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.IPRange - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsAwsquery_deserializeDocumentIPRange(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsAwsquery_deserializeDocumentIssueDetails(v **types.IssueDetails, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.IssueDetails - if *v == nil { - sv = &types.IssueDetails{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("PerformanceIssueDetails", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentPerformanceIssueDetails(&sv.PerformanceIssueDetails, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentKMSKeyNotAccessibleFault(v **types.KMSKeyNotAccessibleFault, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeDocumentInvalidSubnet(v **types.InvalidSubnet, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *types.KMSKeyNotAccessibleFault + var sv *types.InvalidSubnet if *v == nil { - sv = &types.KMSKeyNotAccessibleFault{} + sv = &types.InvalidSubnet{} } else { sv = *v } @@ -43312,6 +44306,342 @@ func awsAwsquery_deserializeDocumentKMSKeyNotAccessibleFault(v **types.KMSKeyNot return nil } +func awsAwsquery_deserializeDocumentInvalidVPCNetworkStateFault(v **types.InvalidVPCNetworkStateFault, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.InvalidVPCNetworkStateFault + if *v == nil { + sv = &types.InvalidVPCNetworkStateFault{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIPRange(v **types.IPRange, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IPRange + if *v == nil { + sv = &types.IPRange{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("CIDRIP", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.CIDRIP = ptr.String(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIPRangeList(v *[]types.IPRange, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.IPRange + if *v == nil { + sv = make([]types.IPRange, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("IPRange", t.Name.Local): + var col types.IPRange + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsAwsquery_deserializeDocumentIPRange(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentIPRangeListUnwrapped(v *[]types.IPRange, decoder smithyxml.NodeDecoder) error { + var sv []types.IPRange + if *v == nil { + sv = make([]types.IPRange, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.IPRange + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsAwsquery_deserializeDocumentIPRange(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsAwsquery_deserializeDocumentIssueDetails(v **types.IssueDetails, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.IssueDetails + if *v == nil { + sv = &types.IssueDetails{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PerformanceIssueDetails", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentPerformanceIssueDetails(&sv.PerformanceIssueDetails, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentKMSKeyNotAccessibleFault(v **types.KMSKeyNotAccessibleFault, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.KMSKeyNotAccessibleFault + if *v == nil { + sv = &types.KMSKeyNotAccessibleFault{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentLimitlessDatabase(v **types.LimitlessDatabase, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.LimitlessDatabase + if *v == nil { + sv = &types.LimitlessDatabase{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("MinRequiredACU", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + f64, err := strconv.ParseFloat(xtv, 64) + if err != nil { + return err + } + sv.MinRequiredACU = ptr.Float64(f64) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = types.LimitlessDatabaseStatus(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsAwsquery_deserializeDocumentLogTypeList(v *[]string, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -43467,6 +44797,55 @@ func awsAwsquery_deserializeDocumentMasterUserSecret(v **types.MasterUserSecret, return nil } +func awsAwsquery_deserializeDocumentMaxDBShardGroupLimitReached(v **types.MaxDBShardGroupLimitReached, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.MaxDBShardGroupLimitReached + if *v == nil { + sv = &types.MaxDBShardGroupLimitReached{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsAwsquery_deserializeDocumentMetric(v **types.Metric, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -52216,123 +53595,172 @@ func awsAwsquery_deserializeDocumentTenantDatabaseQuotaExceededFault(v **types.T return nil } -func awsAwsquery_deserializeDocumentTenantDatabasesList(v *[]types.TenantDatabase, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv []types.TenantDatabase - if *v == nil { - sv = make([]types.TenantDatabase, 0) - } else { - sv = *v - } - - originalDecoder := decoder - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - switch { - case strings.EqualFold("TenantDatabase", t.Name.Local): - var col types.TenantDatabase - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &col - if err := awsAwsquery_deserializeDocumentTenantDatabase(&destAddr, nodeDecoder); err != nil { - return err - } - col = *destAddr - sv = append(sv, col) - - default: - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeDocumentTenantDatabasesListUnwrapped(v *[]types.TenantDatabase, decoder smithyxml.NodeDecoder) error { - var sv []types.TenantDatabase - if *v == nil { - sv = make([]types.TenantDatabase, 0) - } else { - sv = *v - } - - switch { - default: - var mv types.TenantDatabase - t := decoder.StartEl - _ = t - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - destAddr := &mv - if err := awsAwsquery_deserializeDocumentTenantDatabase(&destAddr, nodeDecoder); err != nil { - return err - } - mv = *destAddr - sv = append(sv, mv) - } - *v = sv - return nil -} -func awsAwsquery_deserializeDocumentTimezone(v **types.Timezone, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *types.Timezone - if *v == nil { - sv = &types.Timezone{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("TimezoneName", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.TimezoneName = ptr.String(xtv) - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - +func awsAwsquery_deserializeDocumentTenantDatabasesList(v *[]types.TenantDatabase, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv []types.TenantDatabase + if *v == nil { + sv = make([]types.TenantDatabase, 0) + } else { + sv = *v + } + + originalDecoder := decoder + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + switch { + case strings.EqualFold("TenantDatabase", t.Name.Local): + var col types.TenantDatabase + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &col + if err := awsAwsquery_deserializeDocumentTenantDatabase(&destAddr, nodeDecoder); err != nil { + return err + } + col = *destAddr + sv = append(sv, col) + + default: + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentTenantDatabasesListUnwrapped(v *[]types.TenantDatabase, decoder smithyxml.NodeDecoder) error { + var sv []types.TenantDatabase + if *v == nil { + sv = make([]types.TenantDatabase, 0) + } else { + sv = *v + } + + switch { + default: + var mv types.TenantDatabase + t := decoder.StartEl + _ = t + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + destAddr := &mv + if err := awsAwsquery_deserializeDocumentTenantDatabase(&destAddr, nodeDecoder); err != nil { + return err + } + mv = *destAddr + sv = append(sv, mv) + } + *v = sv + return nil +} +func awsAwsquery_deserializeDocumentTimezone(v **types.Timezone, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.Timezone + if *v == nil { + sv = &types.Timezone{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("TimezoneName", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.TimezoneName = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeDocumentUnsupportedDBEngineVersionFault(v **types.UnsupportedDBEngineVersionFault, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.UnsupportedDBEngineVersionFault + if *v == nil { + sv = &types.UnsupportedDBEngineVersionFault{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("message", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Message = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsAwsquery_deserializeDocumentUpgradeTarget(v **types.UpgradeTarget, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -54840,6 +56268,157 @@ func awsAwsquery_deserializeOpDocumentCreateDBSecurityGroupOutput(v **CreateDBSe return nil } +func awsAwsquery_deserializeOpDocumentCreateDBShardGroupOutput(v **CreateDBShardGroupOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateDBShardGroupOutput + if *v == nil { + sv = &CreateDBShardGroupOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ComputeRedundancy", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ComputeRedundancy = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("DBClusterIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBClusterIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupResourceId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupResourceId = ptr.String(xtv) + } + + case strings.EqualFold("Endpoint", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Endpoint = ptr.String(xtv) + } + + case strings.EqualFold("MaxACU", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + f64, err := strconv.ParseFloat(xtv, 64) + if err != nil { + return err + } + sv.MaxACU = ptr.Float64(f64) + } + + case strings.EqualFold("PubliclyAccessible", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", val) + } + sv.PubliclyAccessible = ptr.Bool(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsAwsquery_deserializeOpDocumentCreateDBSnapshotOutput(v **CreateDBSnapshotOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -56145,6 +57724,157 @@ func awsAwsquery_deserializeOpDocumentDeleteDBProxyOutput(v **DeleteDBProxyOutpu return nil } +func awsAwsquery_deserializeOpDocumentDeleteDBShardGroupOutput(v **DeleteDBShardGroupOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DeleteDBShardGroupOutput + if *v == nil { + sv = &DeleteDBShardGroupOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ComputeRedundancy", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ComputeRedundancy = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("DBClusterIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBClusterIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupResourceId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupResourceId = ptr.String(xtv) + } + + case strings.EqualFold("Endpoint", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Endpoint = ptr.String(xtv) + } + + case strings.EqualFold("MaxACU", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + f64, err := strconv.ParseFloat(xtv, 64) + if err != nil { + return err + } + sv.MaxACU = ptr.Float64(f64) + } + + case strings.EqualFold("PubliclyAccessible", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", val) + } + sv.PubliclyAccessible = ptr.Bool(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsAwsquery_deserializeOpDocumentDeleteDBSnapshotOutput(v **DeleteDBSnapshotOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -57035,13 +58765,233 @@ func awsAwsquery_deserializeOpDocumentDescribeDBClusterSnapshotsOutput(v **Descr return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBClustersOutput(v **DescribeDBClustersOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBClustersOutput(v **DescribeDBClustersOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeDBClustersOutput + if *v == nil { + sv = &DescribeDBClustersOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DBClusters", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentDBClusterList(&sv.DBClusters, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Marker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Marker = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDescribeDBEngineVersionsOutput(v **DescribeDBEngineVersionsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeDBEngineVersionsOutput + if *v == nil { + sv = &DescribeDBEngineVersionsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DBEngineVersions", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentDBEngineVersionList(&sv.DBEngineVersions, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Marker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Marker = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDescribeDBInstanceAutomatedBackupsOutput(v **DescribeDBInstanceAutomatedBackupsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeDBInstanceAutomatedBackupsOutput + if *v == nil { + sv = &DescribeDBInstanceAutomatedBackupsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DBInstanceAutomatedBackups", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentDBInstanceAutomatedBackupList(&sv.DBInstanceAutomatedBackups, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Marker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Marker = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDescribeDBInstancesOutput(v **DescribeDBInstancesOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *DescribeDBInstancesOutput + if *v == nil { + sv = &DescribeDBInstancesOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("DBInstances", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentDBInstanceList(&sv.DBInstances, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Marker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Marker = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsAwsquery_deserializeOpDocumentDescribeDBLogFilesOutput(v **DescribeDBLogFilesOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBClustersOutput + var sv *DescribeDBLogFilesOutput if *v == nil { - sv = &DescribeDBClustersOutput{} + sv = &DescribeDBLogFilesOutput{} } else { sv = *v } @@ -57057,9 +59007,9 @@ func awsAwsquery_deserializeOpDocumentDescribeDBClustersOutput(v **DescribeDBClu originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("DBClusters", t.Name.Local): + case strings.EqualFold("DescribeDBLogFiles", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDBClusterList(&sv.DBClusters, nodeDecoder); err != nil { + if err := awsAwsquery_deserializeDocumentDescribeDBLogFilesList(&sv.DescribeDBLogFiles, nodeDecoder); err != nil { return err } @@ -57090,13 +59040,13 @@ func awsAwsquery_deserializeOpDocumentDescribeDBClustersOutput(v **DescribeDBClu return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBEngineVersionsOutput(v **DescribeDBEngineVersionsOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBParameterGroupsOutput(v **DescribeDBParameterGroupsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBEngineVersionsOutput + var sv *DescribeDBParameterGroupsOutput if *v == nil { - sv = &DescribeDBEngineVersionsOutput{} + sv = &DescribeDBParameterGroupsOutput{} } else { sv = *v } @@ -57112,9 +59062,9 @@ func awsAwsquery_deserializeOpDocumentDescribeDBEngineVersionsOutput(v **Describ originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("DBEngineVersions", t.Name.Local): + case strings.EqualFold("DBParameterGroups", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDBEngineVersionList(&sv.DBEngineVersions, nodeDecoder); err != nil { + if err := awsAwsquery_deserializeDocumentDBParameterGroupList(&sv.DBParameterGroups, nodeDecoder); err != nil { return err } @@ -57145,13 +59095,13 @@ func awsAwsquery_deserializeOpDocumentDescribeDBEngineVersionsOutput(v **Describ return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBInstanceAutomatedBackupsOutput(v **DescribeDBInstanceAutomatedBackupsOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBParametersOutput(v **DescribeDBParametersOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBInstanceAutomatedBackupsOutput + var sv *DescribeDBParametersOutput if *v == nil { - sv = &DescribeDBInstanceAutomatedBackupsOutput{} + sv = &DescribeDBParametersOutput{} } else { sv = *v } @@ -57167,12 +59117,6 @@ func awsAwsquery_deserializeOpDocumentDescribeDBInstanceAutomatedBackupsOutput(v originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("DBInstanceAutomatedBackups", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDBInstanceAutomatedBackupList(&sv.DBInstanceAutomatedBackups, nodeDecoder); err != nil { - return err - } - case strings.EqualFold("Marker", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -57186,61 +59130,12 @@ func awsAwsquery_deserializeOpDocumentDescribeDBInstanceAutomatedBackupsOutput(v sv.Marker = ptr.String(xtv) } - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentDescribeDBInstancesOutput(v **DescribeDBInstancesOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *DescribeDBInstancesOutput - if *v == nil { - sv = &DescribeDBInstancesOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DBInstances", t.Name.Local): + case strings.EqualFold("Parameters", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDBInstanceList(&sv.DBInstances, nodeDecoder); err != nil { + if err := awsAwsquery_deserializeDocumentParametersList(&sv.Parameters, nodeDecoder); err != nil { return err } - case strings.EqualFold("Marker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Marker = ptr.String(xtv) - } - default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -57255,13 +59150,13 @@ func awsAwsquery_deserializeOpDocumentDescribeDBInstancesOutput(v **DescribeDBIn return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBLogFilesOutput(v **DescribeDBLogFilesOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBProxiesOutput(v **DescribeDBProxiesOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBLogFilesOutput + var sv *DescribeDBProxiesOutput if *v == nil { - sv = &DescribeDBLogFilesOutput{} + sv = &DescribeDBProxiesOutput{} } else { sv = *v } @@ -57277,9 +59172,9 @@ func awsAwsquery_deserializeOpDocumentDescribeDBLogFilesOutput(v **DescribeDBLog originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("DescribeDBLogFiles", t.Name.Local): + case strings.EqualFold("DBProxies", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDescribeDBLogFilesList(&sv.DescribeDBLogFiles, nodeDecoder); err != nil { + if err := awsAwsquery_deserializeDocumentDBProxyList(&sv.DBProxies, nodeDecoder); err != nil { return err } @@ -57310,13 +59205,13 @@ func awsAwsquery_deserializeOpDocumentDescribeDBLogFilesOutput(v **DescribeDBLog return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBParameterGroupsOutput(v **DescribeDBParameterGroupsOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBProxyEndpointsOutput(v **DescribeDBProxyEndpointsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBParameterGroupsOutput + var sv *DescribeDBProxyEndpointsOutput if *v == nil { - sv = &DescribeDBParameterGroupsOutput{} + sv = &DescribeDBProxyEndpointsOutput{} } else { sv = *v } @@ -57332,9 +59227,9 @@ func awsAwsquery_deserializeOpDocumentDescribeDBParameterGroupsOutput(v **Descri originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("DBParameterGroups", t.Name.Local): + case strings.EqualFold("DBProxyEndpoints", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDBParameterGroupList(&sv.DBParameterGroups, nodeDecoder); err != nil { + if err := awsAwsquery_deserializeDocumentDBProxyEndpointList(&sv.DBProxyEndpoints, nodeDecoder); err != nil { return err } @@ -57365,13 +59260,13 @@ func awsAwsquery_deserializeOpDocumentDescribeDBParameterGroupsOutput(v **Descri return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBParametersOutput(v **DescribeDBParametersOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBProxyTargetGroupsOutput(v **DescribeDBProxyTargetGroupsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBParametersOutput + var sv *DescribeDBProxyTargetGroupsOutput if *v == nil { - sv = &DescribeDBParametersOutput{} + sv = &DescribeDBProxyTargetGroupsOutput{} } else { sv = *v } @@ -57400,9 +59295,9 @@ func awsAwsquery_deserializeOpDocumentDescribeDBParametersOutput(v **DescribeDBP sv.Marker = ptr.String(xtv) } - case strings.EqualFold("Parameters", t.Name.Local): + case strings.EqualFold("TargetGroups", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentParametersList(&sv.Parameters, nodeDecoder); err != nil { + if err := awsAwsquery_deserializeDocumentTargetGroupList(&sv.TargetGroups, nodeDecoder); err != nil { return err } @@ -57420,13 +59315,13 @@ func awsAwsquery_deserializeOpDocumentDescribeDBParametersOutput(v **DescribeDBP return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBProxiesOutput(v **DescribeDBProxiesOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBProxyTargetsOutput(v **DescribeDBProxyTargetsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBProxiesOutput + var sv *DescribeDBProxyTargetsOutput if *v == nil { - sv = &DescribeDBProxiesOutput{} + sv = &DescribeDBProxyTargetsOutput{} } else { sv = *v } @@ -57442,12 +59337,6 @@ func awsAwsquery_deserializeOpDocumentDescribeDBProxiesOutput(v **DescribeDBProx originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("DBProxies", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDBProxyList(&sv.DBProxies, nodeDecoder); err != nil { - return err - } - case strings.EqualFold("Marker", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -57461,60 +59350,11 @@ func awsAwsquery_deserializeOpDocumentDescribeDBProxiesOutput(v **DescribeDBProx sv.Marker = ptr.String(xtv) } - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { - return err - } - - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentDescribeDBProxyEndpointsOutput(v **DescribeDBProxyEndpointsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *DescribeDBProxyEndpointsOutput - if *v == nil { - sv = &DescribeDBProxyEndpointsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { - case strings.EqualFold("DBProxyEndpoints", t.Name.Local): + case strings.EqualFold("Targets", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDBProxyEndpointList(&sv.DBProxyEndpoints, nodeDecoder); err != nil { - return err - } - - case strings.EqualFold("Marker", t.Name.Local): - val, err := decoder.Value() - if err != nil { + if err := awsAwsquery_deserializeDocumentTargetList(&sv.Targets, nodeDecoder); err != nil { return err } - if val == nil { - break - } - { - xtv := string(val) - sv.Marker = ptr.String(xtv) - } default: // Do nothing and ignore the unexpected tag element @@ -57530,13 +59370,13 @@ func awsAwsquery_deserializeOpDocumentDescribeDBProxyEndpointsOutput(v **Describ return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBProxyTargetGroupsOutput(v **DescribeDBProxyTargetGroupsOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBRecommendationsOutput(v **DescribeDBRecommendationsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBProxyTargetGroupsOutput + var sv *DescribeDBRecommendationsOutput if *v == nil { - sv = &DescribeDBProxyTargetGroupsOutput{} + sv = &DescribeDBRecommendationsOutput{} } else { sv = *v } @@ -57552,61 +59392,12 @@ func awsAwsquery_deserializeOpDocumentDescribeDBProxyTargetGroupsOutput(v **Desc originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("Marker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Marker = ptr.String(xtv) - } - - case strings.EqualFold("TargetGroups", t.Name.Local): + case strings.EqualFold("DBRecommendations", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentTargetGroupList(&sv.TargetGroups, nodeDecoder); err != nil { - return err - } - - default: - // Do nothing and ignore the unexpected tag element - err = decoder.Decoder.Skip() - if err != nil { + if err := awsAwsquery_deserializeDocumentDBRecommendationList(&sv.DBRecommendations, nodeDecoder); err != nil { return err } - } - decoder = originalDecoder - } - *v = sv - return nil -} - -func awsAwsquery_deserializeOpDocumentDescribeDBProxyTargetsOutput(v **DescribeDBProxyTargetsOutput, decoder smithyxml.NodeDecoder) error { - if v == nil { - return fmt.Errorf("unexpected nil of type %T", v) - } - var sv *DescribeDBProxyTargetsOutput - if *v == nil { - sv = &DescribeDBProxyTargetsOutput{} - } else { - sv = *v - } - - for { - t, done, err := decoder.Token() - if err != nil { - return err - } - if done { - break - } - originalDecoder := decoder - decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) - switch { case strings.EqualFold("Marker", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -57620,12 +59411,6 @@ func awsAwsquery_deserializeOpDocumentDescribeDBProxyTargetsOutput(v **DescribeD sv.Marker = ptr.String(xtv) } - case strings.EqualFold("Targets", t.Name.Local): - nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentTargetList(&sv.Targets, nodeDecoder); err != nil { - return err - } - default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -57640,13 +59425,13 @@ func awsAwsquery_deserializeOpDocumentDescribeDBProxyTargetsOutput(v **DescribeD return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBRecommendationsOutput(v **DescribeDBRecommendationsOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBSecurityGroupsOutput(v **DescribeDBSecurityGroupsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBRecommendationsOutput + var sv *DescribeDBSecurityGroupsOutput if *v == nil { - sv = &DescribeDBRecommendationsOutput{} + sv = &DescribeDBSecurityGroupsOutput{} } else { sv = *v } @@ -57662,9 +59447,9 @@ func awsAwsquery_deserializeOpDocumentDescribeDBRecommendationsOutput(v **Descri originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("DBRecommendations", t.Name.Local): + case strings.EqualFold("DBSecurityGroups", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDBRecommendationList(&sv.DBRecommendations, nodeDecoder); err != nil { + if err := awsAwsquery_deserializeDocumentDBSecurityGroups(&sv.DBSecurityGroups, nodeDecoder); err != nil { return err } @@ -57695,13 +59480,13 @@ func awsAwsquery_deserializeOpDocumentDescribeDBRecommendationsOutput(v **Descri return nil } -func awsAwsquery_deserializeOpDocumentDescribeDBSecurityGroupsOutput(v **DescribeDBSecurityGroupsOutput, decoder smithyxml.NodeDecoder) error { +func awsAwsquery_deserializeOpDocumentDescribeDBShardGroupsOutput(v **DescribeDBShardGroupsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *DescribeDBSecurityGroupsOutput + var sv *DescribeDBShardGroupsOutput if *v == nil { - sv = &DescribeDBSecurityGroupsOutput{} + sv = &DescribeDBShardGroupsOutput{} } else { sv = *v } @@ -57717,9 +59502,9 @@ func awsAwsquery_deserializeOpDocumentDescribeDBSecurityGroupsOutput(v **Describ originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("DBSecurityGroups", t.Name.Local): + case strings.EqualFold("DBShardGroups", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsAwsquery_deserializeDocumentDBSecurityGroups(&sv.DBSecurityGroups, nodeDecoder); err != nil { + if err := awsAwsquery_deserializeDocumentDBShardGroupsList(&sv.DBShardGroups, nodeDecoder); err != nil { return err } @@ -60406,6 +62191,157 @@ func awsAwsquery_deserializeOpDocumentModifyDBRecommendationOutput(v **ModifyDBR return nil } +func awsAwsquery_deserializeOpDocumentModifyDBShardGroupOutput(v **ModifyDBShardGroupOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ModifyDBShardGroupOutput + if *v == nil { + sv = &ModifyDBShardGroupOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ComputeRedundancy", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ComputeRedundancy = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("DBClusterIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBClusterIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupResourceId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupResourceId = ptr.String(xtv) + } + + case strings.EqualFold("Endpoint", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Endpoint = ptr.String(xtv) + } + + case strings.EqualFold("MaxACU", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + f64, err := strconv.ParseFloat(xtv, 64) + if err != nil { + return err + } + sv.MaxACU = ptr.Float64(f64) + } + + case strings.EqualFold("PubliclyAccessible", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", val) + } + sv.PubliclyAccessible = ptr.Bool(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsAwsquery_deserializeOpDocumentModifyDBSnapshotAttributeOutput(v **ModifyDBSnapshotAttributeOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -60910,6 +62846,157 @@ func awsAwsquery_deserializeOpDocumentRebootDBInstanceOutput(v **RebootDBInstanc return nil } +func awsAwsquery_deserializeOpDocumentRebootDBShardGroupOutput(v **RebootDBShardGroupOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *RebootDBShardGroupOutput + if *v == nil { + sv = &RebootDBShardGroupOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("ComputeRedundancy", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + i64, err := strconv.ParseInt(xtv, 10, 64) + if err != nil { + return err + } + sv.ComputeRedundancy = ptr.Int32(int32(i64)) + } + + case strings.EqualFold("DBClusterIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBClusterIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupIdentifier", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupIdentifier = ptr.String(xtv) + } + + case strings.EqualFold("DBShardGroupResourceId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.DBShardGroupResourceId = ptr.String(xtv) + } + + case strings.EqualFold("Endpoint", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Endpoint = ptr.String(xtv) + } + + case strings.EqualFold("MaxACU", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + f64, err := strconv.ParseFloat(xtv, 64) + if err != nil { + return err + } + sv.MaxACU = ptr.Float64(f64) + } + + case strings.EqualFold("PubliclyAccessible", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected BooleanOptional to be of type *bool, got %T instead", val) + } + sv.PubliclyAccessible = ptr.Bool(xtv) + } + + case strings.EqualFold("Status", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Status = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsAwsquery_deserializeOpDocumentRegisterDBProxyTargetsOutput(v **RegisterDBProxyTargetsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/service/rds/generated.json b/service/rds/generated.json index 60151831b39..293b705f498 100644 --- a/service/rds/generated.json +++ b/service/rds/generated.json @@ -41,6 +41,7 @@ "api_op_CreateDBProxy.go", "api_op_CreateDBProxyEndpoint.go", "api_op_CreateDBSecurityGroup.go", + "api_op_CreateDBShardGroup.go", "api_op_CreateDBSnapshot.go", "api_op_CreateDBSubnetGroup.go", "api_op_CreateEventSubscription.go", @@ -61,6 +62,7 @@ "api_op_DeleteDBProxy.go", "api_op_DeleteDBProxyEndpoint.go", "api_op_DeleteDBSecurityGroup.go", + "api_op_DeleteDBShardGroup.go", "api_op_DeleteDBSnapshot.go", "api_op_DeleteDBSubnetGroup.go", "api_op_DeleteEventSubscription.go", @@ -92,6 +94,7 @@ "api_op_DescribeDBProxyTargets.go", "api_op_DescribeDBRecommendations.go", "api_op_DescribeDBSecurityGroups.go", + "api_op_DescribeDBShardGroups.go", "api_op_DescribeDBSnapshotAttributes.go", "api_op_DescribeDBSnapshotTenantDatabases.go", "api_op_DescribeDBSnapshots.go", @@ -133,6 +136,7 @@ "api_op_ModifyDBProxyEndpoint.go", "api_op_ModifyDBProxyTargetGroup.go", "api_op_ModifyDBRecommendation.go", + "api_op_ModifyDBShardGroup.go", "api_op_ModifyDBSnapshot.go", "api_op_ModifyDBSnapshotAttribute.go", "api_op_ModifyDBSubnetGroup.go", @@ -145,6 +149,7 @@ "api_op_PurchaseReservedDBInstancesOffering.go", "api_op_RebootDBCluster.go", "api_op_RebootDBInstance.go", + "api_op_RebootDBShardGroup.go", "api_op_RegisterDBProxyTargets.go", "api_op_RemoveFromGlobalCluster.go", "api_op_RemoveRoleFromDBCluster.go", diff --git a/service/rds/serializers.go b/service/rds/serializers.go index b1935acc445..efa5113cdb1 100644 --- a/service/rds/serializers.go +++ b/service/rds/serializers.go @@ -1618,6 +1618,70 @@ func (m *awsAwsquery_serializeOpCreateDBSecurityGroup) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } +type awsAwsquery_serializeOpCreateDBShardGroup struct { +} + +func (*awsAwsquery_serializeOpCreateDBShardGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpCreateDBShardGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateDBShardGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("CreateDBShardGroup") + body.Key("Version").String("2014-10-31") + + if err := awsAwsquery_serializeOpDocumentCreateDBShardGroupInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsquery_serializeOpCreateDBSnapshot struct { } @@ -2898,6 +2962,70 @@ func (m *awsAwsquery_serializeOpDeleteDBSecurityGroup) HandleSerialize(ctx conte return next.HandleSerialize(ctx, in) } +type awsAwsquery_serializeOpDeleteDBShardGroup struct { +} + +func (*awsAwsquery_serializeOpDeleteDBShardGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpDeleteDBShardGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteDBShardGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DeleteDBShardGroup") + body.Key("Version").String("2014-10-31") + + if err := awsAwsquery_serializeOpDocumentDeleteDBShardGroupInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsquery_serializeOpDeleteDBSnapshot struct { } @@ -4878,6 +5006,70 @@ func (m *awsAwsquery_serializeOpDescribeDBSecurityGroups) HandleSerialize(ctx co return next.HandleSerialize(ctx, in) } +type awsAwsquery_serializeOpDescribeDBShardGroups struct { +} + +func (*awsAwsquery_serializeOpDescribeDBShardGroups) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpDescribeDBShardGroups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DescribeDBShardGroupsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("DescribeDBShardGroups") + body.Key("Version").String("2014-10-31") + + if err := awsAwsquery_serializeOpDocumentDescribeDBShardGroupsInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsquery_serializeOpDescribeDBSnapshotAttributes struct { } @@ -7502,6 +7694,70 @@ func (m *awsAwsquery_serializeOpModifyDBRecommendation) HandleSerialize(ctx cont return next.HandleSerialize(ctx, in) } +type awsAwsquery_serializeOpModifyDBShardGroup struct { +} + +func (*awsAwsquery_serializeOpModifyDBShardGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpModifyDBShardGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ModifyDBShardGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("ModifyDBShardGroup") + body.Key("Version").String("2014-10-31") + + if err := awsAwsquery_serializeOpDocumentModifyDBShardGroupInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsquery_serializeOpModifyDBSnapshot struct { } @@ -8270,6 +8526,70 @@ func (m *awsAwsquery_serializeOpRebootDBInstance) HandleSerialize(ctx context.Co return next.HandleSerialize(ctx, in) } +type awsAwsquery_serializeOpRebootDBShardGroup struct { +} + +func (*awsAwsquery_serializeOpRebootDBShardGroup) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsquery_serializeOpRebootDBShardGroup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*RebootDBShardGroupInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded") + + bodyWriter := bytes.NewBuffer(nil) + bodyEncoder := query.NewEncoder(bodyWriter) + body := bodyEncoder.Object() + body.Key("Action").String("RebootDBShardGroup") + body.Key("Version").String("2014-10-31") + + if err := awsAwsquery_serializeOpDocumentRebootDBShardGroupInput(input, bodyEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + err = bodyEncoder.Encode() + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsquery_serializeOpRegisterDBProxyTargets struct { } @@ -11297,6 +11617,11 @@ func awsAwsquery_serializeOpDocumentCreateDBClusterInput(v *CreateDBClusterInput objectKey.Boolean(*v.EnableIAMDatabaseAuthentication) } + if v.EnableLimitlessDatabase != nil { + objectKey := object.Key("EnableLimitlessDatabase") + objectKey.Boolean(*v.EnableLimitlessDatabase) + } + if v.EnableLocalWriteForwarding != nil { objectKey := object.Key("EnableLocalWriteForwarding") objectKey.Boolean(*v.EnableLocalWriteForwarding) @@ -12250,6 +12575,51 @@ func awsAwsquery_serializeOpDocumentCreateDBSecurityGroupInput(v *CreateDBSecuri return nil } +func awsAwsquery_serializeOpDocumentCreateDBShardGroupInput(v *CreateDBShardGroupInput, value query.Value) error { + object := value.Object() + _ = object + + if v.ComputeRedundancy != nil { + objectKey := object.Key("ComputeRedundancy") + objectKey.Integer(*v.ComputeRedundancy) + } + + if v.DBClusterIdentifier != nil { + objectKey := object.Key("DBClusterIdentifier") + objectKey.String(*v.DBClusterIdentifier) + } + + if v.DBShardGroupIdentifier != nil { + objectKey := object.Key("DBShardGroupIdentifier") + objectKey.String(*v.DBShardGroupIdentifier) + } + + if v.MaxACU != nil { + objectKey := object.Key("MaxACU") + switch { + case math.IsNaN(*v.MaxACU): + objectKey.String("NaN") + + case math.IsInf(*v.MaxACU, 1): + objectKey.String("Infinity") + + case math.IsInf(*v.MaxACU, -1): + objectKey.String("-Infinity") + + default: + objectKey.Double(*v.MaxACU) + + } + } + + if v.PubliclyAccessible != nil { + objectKey := object.Key("PubliclyAccessible") + objectKey.Boolean(*v.PubliclyAccessible) + } + + return nil +} + func awsAwsquery_serializeOpDocumentCreateDBSnapshotInput(v *CreateDBSnapshotInput, value query.Value) error { object := value.Object() _ = object @@ -12715,6 +13085,18 @@ func awsAwsquery_serializeOpDocumentDeleteDBSecurityGroupInput(v *DeleteDBSecuri return nil } +func awsAwsquery_serializeOpDocumentDeleteDBShardGroupInput(v *DeleteDBShardGroupInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DBShardGroupIdentifier != nil { + objectKey := object.Key("DBShardGroupIdentifier") + objectKey.String(*v.DBShardGroupIdentifier) + } + + return nil +} + func awsAwsquery_serializeOpDocumentDeleteDBSnapshotInput(v *DeleteDBSnapshotInput, value query.Value) error { object := value.Object() _ = object @@ -13608,6 +13990,35 @@ func awsAwsquery_serializeOpDocumentDescribeDBSecurityGroupsInput(v *DescribeDBS return nil } +func awsAwsquery_serializeOpDocumentDescribeDBShardGroupsInput(v *DescribeDBShardGroupsInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DBShardGroupIdentifier != nil { + objectKey := object.Key("DBShardGroupIdentifier") + objectKey.String(*v.DBShardGroupIdentifier) + } + + if v.Filters != nil { + objectKey := object.Key("Filters") + if err := awsAwsquery_serializeDocumentFilterList(v.Filters, objectKey); err != nil { + return err + } + } + + if v.Marker != nil { + objectKey := object.Key("Marker") + objectKey.String(*v.Marker) + } + + if v.MaxRecords != nil { + objectKey := object.Key("MaxRecords") + objectKey.Integer(*v.MaxRecords) + } + + return nil +} + func awsAwsquery_serializeOpDocumentDescribeDBSnapshotAttributesInput(v *DescribeDBSnapshotAttributesInput, value query.Value) error { object := value.Object() _ = object @@ -14694,6 +15105,11 @@ func awsAwsquery_serializeOpDocumentModifyDBClusterInput(v *ModifyDBClusterInput objectKey.Boolean(*v.EnableIAMDatabaseAuthentication) } + if v.EnableLimitlessDatabase != nil { + objectKey := object.Key("EnableLimitlessDatabase") + objectKey.Boolean(*v.EnableLimitlessDatabase) + } + if v.EnableLocalWriteForwarding != nil { objectKey := object.Key("EnableLocalWriteForwarding") objectKey.Boolean(*v.EnableLocalWriteForwarding) @@ -15337,6 +15753,36 @@ func awsAwsquery_serializeOpDocumentModifyDBRecommendationInput(v *ModifyDBRecom return nil } +func awsAwsquery_serializeOpDocumentModifyDBShardGroupInput(v *ModifyDBShardGroupInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DBShardGroupIdentifier != nil { + objectKey := object.Key("DBShardGroupIdentifier") + objectKey.String(*v.DBShardGroupIdentifier) + } + + if v.MaxACU != nil { + objectKey := object.Key("MaxACU") + switch { + case math.IsNaN(*v.MaxACU): + objectKey.String("NaN") + + case math.IsInf(*v.MaxACU, 1): + objectKey.String("Infinity") + + case math.IsInf(*v.MaxACU, -1): + objectKey.String("-Infinity") + + default: + objectKey.Double(*v.MaxACU) + + } + } + + return nil +} + func awsAwsquery_serializeOpDocumentModifyDBSnapshotAttributeInput(v *ModifyDBSnapshotAttributeInput, value query.Value) error { object := value.Object() _ = object @@ -15630,6 +16076,18 @@ func awsAwsquery_serializeOpDocumentRebootDBInstanceInput(v *RebootDBInstanceInp return nil } +func awsAwsquery_serializeOpDocumentRebootDBShardGroupInput(v *RebootDBShardGroupInput, value query.Value) error { + object := value.Object() + _ = object + + if v.DBShardGroupIdentifier != nil { + objectKey := object.Key("DBShardGroupIdentifier") + objectKey.String(*v.DBShardGroupIdentifier) + } + + return nil +} + func awsAwsquery_serializeOpDocumentRegisterDBProxyTargetsInput(v *RegisterDBProxyTargetsInput, value query.Value) error { object := value.Object() _ = object diff --git a/service/rds/types/enums.go b/service/rds/types/enums.go index a626e41ef83..36c18cdf6eb 100644 --- a/service/rds/types/enums.go +++ b/service/rds/types/enums.go @@ -377,6 +377,36 @@ func (IntegrationStatus) Values() []IntegrationStatus { } } +type LimitlessDatabaseStatus string + +// Enum values for LimitlessDatabaseStatus +const ( + LimitlessDatabaseStatusActive LimitlessDatabaseStatus = "active" + LimitlessDatabaseStatusNotInUse LimitlessDatabaseStatus = "not-in-use" + LimitlessDatabaseStatusEnabled LimitlessDatabaseStatus = "enabled" + LimitlessDatabaseStatusDisabled LimitlessDatabaseStatus = "disabled" + LimitlessDatabaseStatusEnabling LimitlessDatabaseStatus = "enabling" + LimitlessDatabaseStatusDisabling LimitlessDatabaseStatus = "disabling" + LimitlessDatabaseStatusModifyingMaxCapacity LimitlessDatabaseStatus = "modifying-max-capacity" + LimitlessDatabaseStatusError LimitlessDatabaseStatus = "error" +) + +// Values returns all known values for LimitlessDatabaseStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (LimitlessDatabaseStatus) Values() []LimitlessDatabaseStatus { + return []LimitlessDatabaseStatus{ + "active", + "not-in-use", + "enabled", + "disabled", + "enabling", + "disabling", + "modifying-max-capacity", + "error", + } +} + type LocalWriteForwardingStatus string // Enum values for LocalWriteForwardingStatus diff --git a/service/rds/types/errors.go b/service/rds/types/errors.go index cf585d227a7..723a2464e3a 100644 --- a/service/rds/types/errors.go +++ b/service/rds/types/errors.go @@ -1393,6 +1393,59 @@ func (e *DBSecurityGroupQuotaExceededFault) ErrorCode() string { } func (e *DBSecurityGroupQuotaExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// The specified DB shard group name must be unique in your Amazon Web Services +// account in the specified Amazon Web Services Region. +type DBShardGroupAlreadyExistsFault struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *DBShardGroupAlreadyExistsFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DBShardGroupAlreadyExistsFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DBShardGroupAlreadyExistsFault) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "DBShardGroupAlreadyExists" + } + return *e.ErrorCodeOverride +} +func (e *DBShardGroupAlreadyExistsFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified DB shard group name wasn't found. +type DBShardGroupNotFoundFault struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *DBShardGroupNotFoundFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *DBShardGroupNotFoundFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *DBShardGroupNotFoundFault) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "DBShardGroupNotFound" + } + return *e.ErrorCodeOverride +} +func (e *DBShardGroupNotFoundFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // DBSnapshotIdentifier is already used by an existing snapshot. type DBSnapshotAlreadyExistsFault struct { Message *string @@ -2537,6 +2590,32 @@ func (e *InvalidDBSecurityGroupStateFault) ErrorCode() string { } func (e *InvalidDBSecurityGroupStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// The DB shard group must be in the available state. +type InvalidDBShardGroupStateFault struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidDBShardGroupStateFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidDBShardGroupStateFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidDBShardGroupStateFault) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidDBShardGroupState" + } + return *e.ErrorCodeOverride +} +func (e *InvalidDBShardGroupStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // The state of the DB snapshot doesn't allow deletion. type InvalidDBSnapshotStateFault struct { Message *string @@ -2804,6 +2883,33 @@ func (e *InvalidIntegrationStateFault) ErrorCode() string { } func (e *InvalidIntegrationStateFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// The maximum capacity of the DB shard group must be 48-7168 Aurora capacity +// units (ACUs). +type InvalidMaxAcuFault struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *InvalidMaxAcuFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *InvalidMaxAcuFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *InvalidMaxAcuFault) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "InvalidMaxAcu" + } + return *e.ErrorCodeOverride +} +func (e *InvalidMaxAcuFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // The option group isn't in the available state. type InvalidOptionGroupStateFault struct { Message *string @@ -2990,6 +3096,33 @@ func (e *KMSKeyNotAccessibleFault) ErrorCode() string { } func (e *KMSKeyNotAccessibleFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } +// The maximum number of DB shard groups for your Amazon Web Services account in +// the specified Amazon Web Services Region has been reached. +type MaxDBShardGroupLimitReached struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *MaxDBShardGroupLimitReached) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *MaxDBShardGroupLimitReached) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *MaxDBShardGroupLimitReached) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "MaxDBShardGroupLimitReached" + } + return *e.ErrorCodeOverride +} +func (e *MaxDBShardGroupLimitReached) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // The network type is invalid for the DB instance. Valid nework type values are // IPV4 and DUAL . type NetworkTypeNotSupported struct { @@ -3759,3 +3892,29 @@ func (e *TenantDatabaseQuotaExceededFault) ErrorCode() string { return *e.ErrorCodeOverride } func (e *TenantDatabaseQuotaExceededFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + +// The specified DB engine version isn't supported for Aurora Limitless Database. +type UnsupportedDBEngineVersionFault struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *UnsupportedDBEngineVersionFault) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *UnsupportedDBEngineVersionFault) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *UnsupportedDBEngineVersionFault) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "UnsupportedDBEngineVersion" + } + return *e.ErrorCodeOverride +} +func (e *UnsupportedDBEngineVersionFault) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } diff --git a/service/rds/types/types.go b/service/rds/types/types.go index 33d82461c2a..6aa1c3816cf 100644 --- a/service/rds/types/types.go +++ b/service/rds/types/types.go @@ -642,6 +642,9 @@ type DBCluster struct { // The latest time to which a database can be restored with point-in-time restore. LatestRestorableTime *time.Time + // The details for Aurora Limitless Database. + LimitlessDatabase *LimitlessDatabase + // Indicates whether an Aurora DB cluster has in-cluster write forwarding enabled, // not enabled, requested, or is in the process of enabling it. LocalWriteForwardingStatus LocalWriteForwardingStatus @@ -2294,6 +2297,52 @@ type DBSecurityGroupMembership struct { noSmithyDocumentSerde } +type DBShardGroup struct { + + // Specifies whether to create standby instances for the DB shard group. Valid + // values are the following: + // - 0 - Creates a single, primary DB instance for each physical shard. This is + // the default value, and the only one supported for the preview. + // - 1 - Creates a primary DB instance and a standby instance in a different + // Availability Zone (AZ) for each physical shard. + // - 2 - Creates a primary DB instance and two standby instances in different + // AZs for each physical shard. + ComputeRedundancy *int32 + + // The name of the primary DB cluster for the DB shard group. + DBClusterIdentifier *string + + // The name of the DB shard group. + DBShardGroupIdentifier *string + + // The Amazon Web Services Region-unique, immutable identifier for the DB shard + // group. + DBShardGroupResourceId *string + + // The connection endpoint for the DB shard group. + Endpoint *string + + // The maximum capacity of the DB shard group in Aurora capacity units (ACUs). + MaxACU *float64 + + // Indicates whether the DB shard group is publicly accessible. When the DB shard + // group is publicly accessible, its Domain Name System (DNS) endpoint resolves to + // the private IP address from within the DB shard group's virtual private cloud + // (VPC). It resolves to the public IP address from outside of the DB shard group's + // VPC. Access to the DB shard group is ultimately controlled by the security group + // it uses. That public access isn't permitted if the security group assigned to + // the DB shard group doesn't permit it. When the DB shard group isn't publicly + // accessible, it is an internal DB shard group with a DNS name that resolves to a + // private IP address. For more information, see CreateDBShardGroup . This setting + // is only for Aurora Limitless Database. + PubliclyAccessible *bool + + // The status of the DB shard group. + Status *string + + noSmithyDocumentSerde +} + // Contains the details of an Amazon RDS DB snapshot. This data type is used as a // response element in the DescribeDBSnapshots action. type DBSnapshot struct { @@ -3059,6 +3108,19 @@ type IssueDetails struct { noSmithyDocumentSerde } +// Contains details for Aurora Limitless Database. +type LimitlessDatabase struct { + + // The minimum required capacity for Aurora Limitless Database in Aurora capacity + // units (ACUs). + MinRequiredACU *float64 + + // The status of Aurora Limitless Database. + Status LimitlessDatabaseStatus + + noSmithyDocumentSerde +} + // Contains the secret managed by RDS in Amazon Web Services Secrets Manager for // the master user password. For more information, see Password management with // Amazon Web Services Secrets Manager (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) diff --git a/service/rds/validators.go b/service/rds/validators.go index 7fda8ce8885..923bed22480 100644 --- a/service/rds/validators.go +++ b/service/rds/validators.go @@ -510,6 +510,26 @@ func (m *validateOpCreateDBSecurityGroup) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpCreateDBShardGroup struct { +} + +func (*validateOpCreateDBShardGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateDBShardGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateDBShardGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateDBShardGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpCreateDBSnapshot struct { } @@ -870,6 +890,26 @@ func (m *validateOpDeleteDBSecurityGroup) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpDeleteDBShardGroup struct { +} + +func (*validateOpDeleteDBShardGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteDBShardGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteDBShardGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteDBShardGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteDBSnapshot struct { } @@ -1470,6 +1510,26 @@ func (m *validateOpDescribeDBSecurityGroups) HandleInitialize(ctx context.Contex return next.HandleInitialize(ctx, in) } +type validateOpDescribeDBShardGroups struct { +} + +func (*validateOpDescribeDBShardGroups) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDescribeDBShardGroups) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DescribeDBShardGroupsInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDescribeDBShardGroupsInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDescribeDBSnapshotAttributes struct { } @@ -2250,6 +2310,26 @@ func (m *validateOpModifyDBRecommendation) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpModifyDBShardGroup struct { +} + +func (*validateOpModifyDBShardGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpModifyDBShardGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*ModifyDBShardGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpModifyDBShardGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpModifyDBSnapshotAttribute struct { } @@ -2470,6 +2550,26 @@ func (m *validateOpRebootDBInstance) HandleInitialize(ctx context.Context, in mi return next.HandleInitialize(ctx, in) } +type validateOpRebootDBShardGroup struct { +} + +func (*validateOpRebootDBShardGroup) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpRebootDBShardGroup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*RebootDBShardGroupInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpRebootDBShardGroupInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpRegisterDBProxyTargets struct { } @@ -3090,6 +3190,10 @@ func addOpCreateDBSecurityGroupValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpCreateDBSecurityGroup{}, middleware.After) } +func addOpCreateDBShardGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateDBShardGroup{}, middleware.After) +} + func addOpCreateDBSnapshotValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpCreateDBSnapshot{}, middleware.After) } @@ -3162,6 +3266,10 @@ func addOpDeleteDBSecurityGroupValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpDeleteDBSecurityGroup{}, middleware.After) } +func addOpDeleteDBShardGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteDBShardGroup{}, middleware.After) +} + func addOpDeleteDBSnapshotValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteDBSnapshot{}, middleware.After) } @@ -3282,6 +3390,10 @@ func addOpDescribeDBSecurityGroupsValidationMiddleware(stack *middleware.Stack) return stack.Initialize.Add(&validateOpDescribeDBSecurityGroups{}, middleware.After) } +func addOpDescribeDBShardGroupsValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDescribeDBShardGroups{}, middleware.After) +} + func addOpDescribeDBSnapshotAttributesValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDescribeDBSnapshotAttributes{}, middleware.After) } @@ -3438,6 +3550,10 @@ func addOpModifyDBRecommendationValidationMiddleware(stack *middleware.Stack) er return stack.Initialize.Add(&validateOpModifyDBRecommendation{}, middleware.After) } +func addOpModifyDBShardGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpModifyDBShardGroup{}, middleware.After) +} + func addOpModifyDBSnapshotAttributeValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpModifyDBSnapshotAttribute{}, middleware.After) } @@ -3482,6 +3598,10 @@ func addOpRebootDBInstanceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpRebootDBInstance{}, middleware.After) } +func addOpRebootDBShardGroupValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpRebootDBShardGroup{}, middleware.After) +} + func addOpRegisterDBProxyTargetsValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpRegisterDBProxyTargets{}, middleware.After) } @@ -4168,6 +4288,27 @@ func validateOpCreateDBSecurityGroupInput(v *CreateDBSecurityGroupInput) error { } } +func validateOpCreateDBShardGroupInput(v *CreateDBShardGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateDBShardGroupInput"} + if v.DBShardGroupIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DBShardGroupIdentifier")) + } + if v.DBClusterIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DBClusterIdentifier")) + } + if v.MaxACU == nil { + invalidParams.Add(smithy.NewErrParamRequired("MaxACU")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpCreateDBSnapshotInput(v *CreateDBSnapshotInput) error { if v == nil { return nil @@ -4477,6 +4618,21 @@ func validateOpDeleteDBSecurityGroupInput(v *DeleteDBSecurityGroupInput) error { } } +func validateOpDeleteDBShardGroupInput(v *DeleteDBShardGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteDBShardGroupInput"} + if v.DBShardGroupIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DBShardGroupIdentifier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteDBSnapshotInput(v *DeleteDBSnapshotInput) error { if v == nil { return nil @@ -4990,6 +5146,23 @@ func validateOpDescribeDBSecurityGroupsInput(v *DescribeDBSecurityGroupsInput) e } } +func validateOpDescribeDBShardGroupsInput(v *DescribeDBShardGroupsInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DescribeDBShardGroupsInput"} + if v.Filters != nil { + if err := validateFilterList(v.Filters); err != nil { + invalidParams.AddNested("Filters", err.(smithy.InvalidParamsError)) + } + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDescribeDBSnapshotAttributesInput(v *DescribeDBSnapshotAttributesInput) error { if v == nil { return nil @@ -5656,6 +5829,21 @@ func validateOpModifyDBRecommendationInput(v *ModifyDBRecommendationInput) error } } +func validateOpModifyDBShardGroupInput(v *ModifyDBShardGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "ModifyDBShardGroupInput"} + if v.DBShardGroupIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DBShardGroupIdentifier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpModifyDBSnapshotAttributeInput(v *ModifyDBSnapshotAttributeInput) error { if v == nil { return nil @@ -5835,6 +6023,21 @@ func validateOpRebootDBInstanceInput(v *RebootDBInstanceInput) error { } } +func validateOpRebootDBShardGroupInput(v *RebootDBShardGroupInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "RebootDBShardGroupInput"} + if v.DBShardGroupIdentifier == nil { + invalidParams.Add(smithy.NewErrParamRequired("DBShardGroupIdentifier")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpRegisterDBProxyTargetsInput(v *RegisterDBProxyTargetsInput) error { if v == nil { return nil diff --git a/service/storagegateway/api_op_DescribeTapes.go b/service/storagegateway/api_op_DescribeTapes.go index d078d8e5c0f..1601dce29af 100644 --- a/service/storagegateway/api_op_DescribeTapes.go +++ b/service/storagegateway/api_op_DescribeTapes.go @@ -12,10 +12,15 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a description of the specified Amazon Resource Name (ARN) of virtual -// tapes. If a TapeARN is not specified, returns a description of all virtual -// tapes associated with the specified gateway. This operation is only supported in -// the tape gateway type. +// Returns a description of virtual tapes that correspond to the specified Amazon +// Resource Names (ARNs). If TapeARN is not specified, returns a description of +// the virtual tapes associated with the specified gateway. This operation is only +// supported for the tape gateway type. The operation supports pagination. By +// default, the operation returns a maximum of up to 100 tapes. You can optionally +// specify the Limit field in the body to limit the number of tapes in the +// response. If the number of tapes returned in the response is truncated, the +// response includes a Marker field. You can use this Marker value in your +// subsequent request to retrieve the next set of tapes. func (c *Client) DescribeTapes(ctx context.Context, params *DescribeTapesInput, optFns ...func(*Options)) (*DescribeTapesOutput, error) { if params == nil { params = &DescribeTapesInput{} diff --git a/service/storagegateway/api_op_NotifyWhenUploaded.go b/service/storagegateway/api_op_NotifyWhenUploaded.go index 0081e637c84..271b1962416 100644 --- a/service/storagegateway/api_op_NotifyWhenUploaded.go +++ b/service/storagegateway/api_op_NotifyWhenUploaded.go @@ -12,7 +12,7 @@ import ( ) // Sends you notification through CloudWatch Events when all files written to your -// file share have been uploaded to S3. Amazon S3. Storage Gateway can send a +// file share have been uploaded to Amazon S3. Storage Gateway can send a // notification through Amazon CloudWatch Events when all files written to your // file share up to that point in time have been uploaded to Amazon S3. These files // include files written to the file share up to the time that you make a request diff --git a/service/storagegateway/api_op_RefreshCache.go b/service/storagegateway/api_op_RefreshCache.go index 7cc6c2fda1b..4a0487ceb2d 100644 --- a/service/storagegateway/api_op_RefreshCache.go +++ b/service/storagegateway/api_op_RefreshCache.go @@ -19,19 +19,19 @@ import ( // inventory of the objects in the S3 bucket. This operation is only supported in // the S3 File Gateway types. You can subscribe to be notified through an Amazon // CloudWatch event when your RefreshCache operation completes. For more -// information, see Getting notified about file operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification) -// in the Storage Gateway User Guide. This operation is Only supported for S3 File -// Gateways. When this API is called, it only initiates the refresh operation. When -// the API call completes and returns a success code, it doesn't necessarily mean -// that the file refresh has completed. You should use the refresh-complete -// notification to determine that the operation has completed before you check for -// new files on the gateway file share. You can subscribe to be notified through a -// CloudWatch event when your RefreshCache operation completes. Throttle limit: -// This API is asynchronous, so the gateway will accept no more than two refreshes -// at any time. We recommend using the refresh-complete CloudWatch event -// notification before issuing additional requests. For more information, see -// Getting notified about file operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification) -// in the Storage Gateway User Guide. +// information, see Getting notified about file operations (https://docs.aws.amazon.com/filegateway/latest/files3/monitoring-file-gateway.html#get-notification) +// in the Amazon S3 File Gateway User Guide. This operation is Only supported for +// S3 File Gateways. When this API is called, it only initiates the refresh +// operation. When the API call completes and returns a success code, it doesn't +// necessarily mean that the file refresh has completed. You should use the +// refresh-complete notification to determine that the operation has completed +// before you check for new files on the gateway file share. You can subscribe to +// be notified through a CloudWatch event when your RefreshCache operation +// completes. Throttle limit: This API is asynchronous, so the gateway will accept +// no more than two refreshes at any time. We recommend using the refresh-complete +// CloudWatch event notification before issuing additional requests. For more +// information, see Getting notified about file operations (https://docs.aws.amazon.com/filegateway/latest/files3/monitoring-file-gateway.html#get-notification) +// in the Amazon S3 File Gateway User Guide. // - Wait at least 60 seconds between consecutive RefreshCache API requests. // - If you invoke the RefreshCache API when two requests are already being // processed, any new request will cause an InvalidGatewayRequestException error @@ -39,8 +39,8 @@ import ( // // The S3 bucket name does not need to be included when entering the list of // folders in the FolderList parameter. For more information, see Getting notified -// about file operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification) -// in the Storage Gateway User Guide. +// about file operations (https://docs.aws.amazon.com/filegateway/latest/files3/monitoring-file-gateway.html#get-notification) +// in the Amazon S3 File Gateway User Guide. func (c *Client) RefreshCache(ctx context.Context, params *RefreshCacheInput, optFns ...func(*Options)) (*RefreshCacheOutput, error) { if params == nil { params = &RefreshCacheInput{} diff --git a/service/storagegateway/api_op_ShutdownGateway.go b/service/storagegateway/api_op_ShutdownGateway.go index 1363992af29..30435ffb05b 100644 --- a/service/storagegateway/api_op_ShutdownGateway.go +++ b/service/storagegateway/api_op_ShutdownGateway.go @@ -11,21 +11,22 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Shuts down a gateway. To specify which gateway to shut down, use the Amazon -// Resource Name (ARN) of the gateway in the body of your request. The operation -// shuts down the gateway service component running in the gateway's virtual -// machine (VM) and not the host VM. If you want to shut down the VM, it is -// recommended that you first shut down the gateway component in the VM to avoid -// unpredictable conditions. After the gateway is shutdown, you cannot call any -// other API except StartGateway , DescribeGatewayInformation , and ListGateways . -// For more information, see ActivateGateway . Your applications cannot read from -// or write to the gateway's storage volumes, and there are no snapshots taken. -// When you make a shutdown request, you will get a 200 OK success response -// immediately. However, it might take some time for the gateway to shut down. You -// can call the DescribeGatewayInformation API to check the status. For more -// information, see ActivateGateway . If do not intend to use the gateway again, -// you must delete the gateway (using DeleteGateway ) to no longer pay software -// charges associated with the gateway. +// Shuts down a Tape Gateway or Volume Gateway. To specify which gateway to shut +// down, use the Amazon Resource Name (ARN) of the gateway in the body of your +// request. This API action cannot be used to shut down S3 File Gateway or FSx File +// Gateway. The operation shuts down the gateway service component running in the +// gateway's virtual machine (VM) and not the host VM. If you want to shut down the +// VM, it is recommended that you first shut down the gateway component in the VM +// to avoid unpredictable conditions. After the gateway is shutdown, you cannot +// call any other API except StartGateway , DescribeGatewayInformation , and +// ListGateways . For more information, see ActivateGateway . Your applications +// cannot read from or write to the gateway's storage volumes, and there are no +// snapshots taken. When you make a shutdown request, you will get a 200 OK +// success response immediately. However, it might take some time for the gateway +// to shut down. You can call the DescribeGatewayInformation API to check the +// status. For more information, see ActivateGateway . If do not intend to use the +// gateway again, you must delete the gateway (using DeleteGateway ) to no longer +// pay software charges associated with the gateway. func (c *Client) ShutdownGateway(ctx context.Context, params *ShutdownGatewayInput, optFns ...func(*Options)) (*ShutdownGatewayOutput, error) { if params == nil { params = &ShutdownGatewayInput{} diff --git a/service/storagegateway/deserializers.go b/service/storagegateway/deserializers.go index 6cba8ce41d0..fa78b0060a7 100644 --- a/service/storagegateway/deserializers.go +++ b/service/storagegateway/deserializers.go @@ -11964,6 +11964,15 @@ func awsAwsjson11_deserializeDocumentGatewayInfo(v **types.GatewayInfo, value in for key, value := range shape { switch key { + case "DeprecationDate": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected DeprecationDate to be of type string, got %T instead", value) + } + sv.DeprecationDate = ptr.String(jtv) + } + case "Ec2InstanceId": if value != nil { jtv, ok := value.(string) @@ -12045,6 +12054,15 @@ func awsAwsjson11_deserializeDocumentGatewayInfo(v **types.GatewayInfo, value in sv.HostEnvironmentId = ptr.String(jtv) } + case "SoftwareVersion": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected SoftwareVersion to be of type string, got %T instead", value) + } + sv.SoftwareVersion = ptr.String(jtv) + } + default: _, _ = key, value diff --git a/service/storagegateway/endpoints.go b/service/storagegateway/endpoints.go index 037dfd38855..91d7adf8dfd 100644 --- a/service/storagegateway/endpoints.go +++ b/service/storagegateway/endpoints.go @@ -366,7 +366,7 @@ func (r *resolver) ResolveEndpoint( } } if _UseFIPS == true { - if true == _PartitionResult.SupportsFIPS { + if _PartitionResult.SupportsFIPS == true { uriString := func() string { var out strings.Builder out.WriteString("https://storagegateway-fips.") diff --git a/service/storagegateway/types/types.go b/service/storagegateway/types/types.go index ec1c0508ec2..4152a7f7353 100644 --- a/service/storagegateway/types/types.go +++ b/service/storagegateway/types/types.go @@ -387,6 +387,10 @@ type FileSystemAssociationSummary struct { // Describes a gateway object. type GatewayInfo struct { + // Date after which this gateway will not receive software updates for new + // features and bug fixes. + DeprecationDate *string + // The ID of the Amazon EC2 instance that was used to launch the gateway. Ec2InstanceId *string @@ -419,6 +423,9 @@ type GatewayInfo struct { // format depends on the host environment type. HostEnvironmentId *string + // The version number of the software running on the gateway appliance. + SoftwareVersion *string + noSmithyDocumentSerde }