From d4103b74479201ef1934bcee5414c15f0bada5f1 Mon Sep 17 00:00:00 2001 From: AWS SDK for Go v2 automation user Date: Tue, 1 Oct 2024 18:42:09 +0000 Subject: [PATCH] Regenerated Clients --- .../2d0a3c49734a405bb3af3f1823a27bc7.json | 8 + .../32666c16416347a4b9e0e586b1e2feab.json | 8 + .../d0ca0869a234425c89af877eac3be86a.json | 8 + .../attributevalue/go_module_metadata.go | 2 +- .../api_op_CreateKnowledgeBase.go | 10 +- service/bedrockagent/api_op_GetDataSource.go | 2 +- .../bedrockagent/api_op_GetIngestionJob.go | 14 +- .../bedrockagent/api_op_GetKnowledgeBase.go | 2 +- .../bedrockagent/api_op_ListIngestionJobs.go | 13 +- .../bedrockagent/api_op_ListKnowledgeBases.go | 5 +- .../bedrockagent/api_op_StartIngestionJob.go | 12 +- .../bedrockagent/api_op_StopIngestionJob.go | 172 ++++++++++++++++++ service/bedrockagent/deserializers.go | 167 +++++++++++++++++ service/bedrockagent/generated.json | 1 + service/bedrockagent/serializers.go | 89 +++++++++ .../snapshot/api_op_StopIngestionJob.go.snap | 41 +++++ service/bedrockagent/snapshot_test.go | 24 +++ service/bedrockagent/types/enums.go | 4 + service/bedrockagent/types/types.go | 78 ++++---- service/bedrockagent/validators.go | 45 +++++ .../api_op_GetRepositoryEndpoint.go | 3 + service/codeartifact/serializers.go | 4 + service/codeartifact/types/enums.go | 19 ++ service/codeartifact/types/types.go | 8 + .../location/internal/endpoints/endpoints.go | 3 + .../memorydb/internal/endpoints/endpoints.go | 3 + .../internal/endpoints/endpoints.go | 3 + service/rds/api_op_CreateDBCluster.go | 9 + service/rds/api_op_CreateDBShardGroup.go | 19 ++ service/rds/api_op_DeleteDBShardGroup.go | 10 + service/rds/api_op_ModifyDBShardGroup.go | 10 + service/rds/api_op_RebootDBShardGroup.go | 10 + service/rds/deserializers.go | 46 +++++ service/rds/serializers.go | 12 ++ service/rds/types/enums.go | 19 ++ service/rds/types/types.go | 14 ++ 36 files changed, 834 insertions(+), 63 deletions(-) create mode 100644 .changelog/2d0a3c49734a405bb3af3f1823a27bc7.json create mode 100644 .changelog/32666c16416347a4b9e0e586b1e2feab.json create mode 100644 .changelog/d0ca0869a234425c89af877eac3be86a.json create mode 100644 service/bedrockagent/api_op_StopIngestionJob.go create mode 100644 service/bedrockagent/snapshot/api_op_StopIngestionJob.go.snap diff --git a/.changelog/2d0a3c49734a405bb3af3f1823a27bc7.json b/.changelog/2d0a3c49734a405bb3af3f1823a27bc7.json new file mode 100644 index 00000000000..d1082429424 --- /dev/null +++ b/.changelog/2d0a3c49734a405bb3af3f1823a27bc7.json @@ -0,0 +1,8 @@ +{ + "id": "2d0a3c49-734a-405b-b3af-3f1823a27bc7", + "type": "feature", + "description": "This release provides additional support for enabling Aurora Limitless Database DB clusters.", + "modules": [ + "service/rds" + ] +} \ No newline at end of file diff --git a/.changelog/32666c16416347a4b9e0e586b1e2feab.json b/.changelog/32666c16416347a4b9e0e586b1e2feab.json new file mode 100644 index 00000000000..2004571c921 --- /dev/null +++ b/.changelog/32666c16416347a4b9e0e586b1e2feab.json @@ -0,0 +1,8 @@ +{ + "id": "32666c16-4163-47a4-b9e0-e586b1e2feab", + "type": "feature", + "description": "This release adds support to stop an ongoing ingestion job using the StopIngestionJob API in Agents for Amazon Bedrock.", + "modules": [ + "service/bedrockagent" + ] +} \ No newline at end of file diff --git a/.changelog/d0ca0869a234425c89af877eac3be86a.json b/.changelog/d0ca0869a234425c89af877eac3be86a.json new file mode 100644 index 00000000000..5a7b994f9a8 --- /dev/null +++ b/.changelog/d0ca0869a234425c89af877eac3be86a.json @@ -0,0 +1,8 @@ +{ + "id": "d0ca0869-a234-425c-89af-877eac3be86a", + "type": "feature", + "description": "Add support for the dual stack endpoints.", + "modules": [ + "service/codeartifact" + ] +} \ No newline at end of file diff --git a/feature/dynamodbstreams/attributevalue/go_module_metadata.go b/feature/dynamodbstreams/attributevalue/go_module_metadata.go index a5c4c938ecd..113a2f3cd9a 100644 --- a/feature/dynamodbstreams/attributevalue/go_module_metadata.go +++ b/feature/dynamodbstreams/attributevalue/go_module_metadata.go @@ -3,4 +3,4 @@ package attributevalue // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.24" +const goModuleVersion = "1.15.8" diff --git a/service/bedrockagent/api_op_CreateKnowledgeBase.go b/service/bedrockagent/api_op_CreateKnowledgeBase.go index 45a7e2aadd0..df5454fbf52 100644 --- a/service/bedrockagent/api_op_CreateKnowledgeBase.go +++ b/service/bedrockagent/api_op_CreateKnowledgeBase.go @@ -11,10 +11,10 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a knowledge base that contains data sources from which information can -// be queried and used by LLMs. To create a knowledge base, you must first set up -// your data sources and configure a supported vector store. For more information, -// see [Set up your data for ingestion]. +// Creates a knowledge base. A knowledge base contains your data sources so that +// Large Language Models (LLMs) can use your data. To create a knowledge base, you +// must first set up your data sources and configure a supported vector store. For +// more information, see [Set up a knowledge base]. // // If you prefer to let Amazon Bedrock create and manage a vector store for you in // Amazon OpenSearch Service, use the console. For more information, see [Create a knowledge base]. @@ -42,10 +42,10 @@ import ( // - For a Redis Enterprise Cloud database, use the // redisEnterpriseCloudConfiguration object. For more information, see [Create a vector store in Redis Enterprise Cloud]. // -// [Set up your data for ingestion]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup.html // [Create a knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-create // [Create a vector store in Amazon OpenSearch Service]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-oss.html // [Create a vector store in Redis Enterprise Cloud]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-redis.html +// [Set up a knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowlege-base-prereq.html // [Create a vector store in Amazon Aurora]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-rds.html // [Create a vector store in Pinecone]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-pinecone.html func (c *Client) CreateKnowledgeBase(ctx context.Context, params *CreateKnowledgeBaseInput, optFns ...func(*Options)) (*CreateKnowledgeBaseOutput, error) { diff --git a/service/bedrockagent/api_op_GetDataSource.go b/service/bedrockagent/api_op_GetDataSource.go index fa0168a23df..2aee15da8e4 100644 --- a/service/bedrockagent/api_op_GetDataSource.go +++ b/service/bedrockagent/api_op_GetDataSource.go @@ -34,7 +34,7 @@ type GetDataSourceInput struct { // This member is required. DataSourceId *string - // The unique identifier of the knowledge base that the data source was added to. + // The unique identifier of the knowledge base for the data source. // // This member is required. KnowledgeBaseId *string diff --git a/service/bedrockagent/api_op_GetIngestionJob.go b/service/bedrockagent/api_op_GetIngestionJob.go index 9e83347874e..4ce9ff81f91 100644 --- a/service/bedrockagent/api_op_GetIngestionJob.go +++ b/service/bedrockagent/api_op_GetIngestionJob.go @@ -11,8 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets information about a ingestion job, in which a data source is added to a -// knowledge base. +// Gets information about a data ingestion job. Data sources are ingested into +// your knowledge base so that Large Lanaguage Models (LLMs) can use your data. func (c *Client) GetIngestionJob(ctx context.Context, params *GetIngestionJobInput, optFns ...func(*Options)) (*GetIngestionJobOutput, error) { if params == nil { params = &GetIngestionJobInput{} @@ -30,17 +30,19 @@ func (c *Client) GetIngestionJob(ctx context.Context, params *GetIngestionJobInp type GetIngestionJobInput struct { - // The unique identifier of the data source in the ingestion job. + // The unique identifier of the data source for the data ingestion job you want to + // get information on. // // This member is required. DataSourceId *string - // The unique identifier of the ingestion job. + // The unique identifier of the data ingestion job you want to get information on. // // This member is required. IngestionJobId *string - // The unique identifier of the knowledge base for which the ingestion job applies. + // The unique identifier of the knowledge base for the data ingestion job you want + // to get information on. // // This member is required. KnowledgeBaseId *string @@ -50,7 +52,7 @@ type GetIngestionJobInput struct { type GetIngestionJobOutput struct { - // Contains details about the ingestion job. + // Contains details about the data ingestion job. // // This member is required. IngestionJob *types.IngestionJob diff --git a/service/bedrockagent/api_op_GetKnowledgeBase.go b/service/bedrockagent/api_op_GetKnowledgeBase.go index 808b21179dc..bd20056b011 100644 --- a/service/bedrockagent/api_op_GetKnowledgeBase.go +++ b/service/bedrockagent/api_op_GetKnowledgeBase.go @@ -29,7 +29,7 @@ func (c *Client) GetKnowledgeBase(ctx context.Context, params *GetKnowledgeBaseI type GetKnowledgeBaseInput struct { - // The unique identifier of the knowledge base for which to get information. + // The unique identifier of the knowledge base you want to get information on. // // This member is required. KnowledgeBaseId *string diff --git a/service/bedrockagent/api_op_ListIngestionJobs.go b/service/bedrockagent/api_op_ListIngestionJobs.go index 34c2b99ef24..6325485917d 100644 --- a/service/bedrockagent/api_op_ListIngestionJobs.go +++ b/service/bedrockagent/api_op_ListIngestionJobs.go @@ -11,7 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the ingestion jobs for a data source and information about each of them. +// Lists the data ingestion jobs for a data source. The list also includes +// information about each job. func (c *Client) ListIngestionJobs(ctx context.Context, params *ListIngestionJobsInput, optFns ...func(*Options)) (*ListIngestionJobsOutput, error) { if params == nil { params = &ListIngestionJobsInput{} @@ -29,17 +30,17 @@ func (c *Client) ListIngestionJobs(ctx context.Context, params *ListIngestionJob type ListIngestionJobsInput struct { - // The unique identifier of the data source for which to return ingestion jobs. + // The unique identifier of the data source for the list of data ingestion jobs. // // This member is required. DataSourceId *string - // The unique identifier of the knowledge base for which to return ingestion jobs. + // The unique identifier of the knowledge base for the list of data ingestion jobs. // // This member is required. KnowledgeBaseId *string - // Contains a definition of a filter for which to filter the results. + // Contains information about the filters for filtering the data. Filters []types.IngestionJobFilter // The maximum number of results to return in the response. If the total number of @@ -53,7 +54,7 @@ type ListIngestionJobsInput struct { // this field to return the next batch of results. NextToken *string - // Contains details about how to sort the results. + // Contains details about how to sort the data. SortBy *types.IngestionJobSortBy noSmithyDocumentSerde @@ -61,7 +62,7 @@ type ListIngestionJobsInput struct { type ListIngestionJobsOutput struct { - // A list of objects, each of which contains information about an ingestion job. + // A list of data ingestion jobs with information about each job. // // This member is required. IngestionJobSummaries []types.IngestionJobSummary diff --git a/service/bedrockagent/api_op_ListKnowledgeBases.go b/service/bedrockagent/api_op_ListKnowledgeBases.go index 2a8e4e9e8ce..1cc68a4f724 100644 --- a/service/bedrockagent/api_op_ListKnowledgeBases.go +++ b/service/bedrockagent/api_op_ListKnowledgeBases.go @@ -11,7 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the knowledge bases in an account and information about each of them. +// Lists the knowledge bases in an account. The list also includesinformation +// about each knowledge base. func (c *Client) ListKnowledgeBases(ctx context.Context, params *ListKnowledgeBasesInput, optFns ...func(*Options)) (*ListKnowledgeBasesOutput, error) { if params == nil { params = &ListKnowledgeBasesInput{} @@ -45,7 +46,7 @@ type ListKnowledgeBasesInput struct { type ListKnowledgeBasesOutput struct { - // A list of objects, each of which contains information about a knowledge base. + // A list of knowledge bases with information about each knowledge base. // // This member is required. KnowledgeBaseSummaries []types.KnowledgeBaseSummary diff --git a/service/bedrockagent/api_op_StartIngestionJob.go b/service/bedrockagent/api_op_StartIngestionJob.go index 6805a1f71da..e7637bd34a1 100644 --- a/service/bedrockagent/api_op_StartIngestionJob.go +++ b/service/bedrockagent/api_op_StartIngestionJob.go @@ -11,7 +11,8 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Begins an ingestion job, in which a data source is added to a knowledge base. +// Begins a data ingestion job. Data sources are ingested into your knowledge base +// so that Large Language Models (LLMs) can use your data. func (c *Client) StartIngestionJob(ctx context.Context, params *StartIngestionJobInput, optFns ...func(*Options)) (*StartIngestionJobOutput, error) { if params == nil { params = &StartIngestionJobInput{} @@ -29,12 +30,13 @@ func (c *Client) StartIngestionJob(ctx context.Context, params *StartIngestionJo type StartIngestionJobInput struct { - // The unique identifier of the data source to ingest. + // The unique identifier of the data source you want to ingest into your knowledge + // base. // // This member is required. DataSourceId *string - // The unique identifier of the knowledge base to which to add the data source. + // The unique identifier of the knowledge base for the data ingestion job. // // This member is required. KnowledgeBaseId *string @@ -46,7 +48,7 @@ type StartIngestionJobInput struct { // [Ensuring idempotency]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html ClientToken *string - // A description of the ingestion job. + // A description of the data ingestion job. Description *string noSmithyDocumentSerde @@ -54,7 +56,7 @@ type StartIngestionJobInput struct { type StartIngestionJobOutput struct { - // An object containing information about the ingestion job. + // Contains information about the data ingestion job. // // This member is required. IngestionJob *types.IngestionJob diff --git a/service/bedrockagent/api_op_StopIngestionJob.go b/service/bedrockagent/api_op_StopIngestionJob.go new file mode 100644 index 00000000000..4cdf08ae1c1 --- /dev/null +++ b/service/bedrockagent/api_op_StopIngestionJob.go @@ -0,0 +1,172 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package bedrockagent + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/bedrockagent/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Stops a currently running data ingestion job. You can send a StartIngestionJob +// request again to ingest the rest of your data when you are ready. +func (c *Client) StopIngestionJob(ctx context.Context, params *StopIngestionJobInput, optFns ...func(*Options)) (*StopIngestionJobOutput, error) { + if params == nil { + params = &StopIngestionJobInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "StopIngestionJob", params, optFns, c.addOperationStopIngestionJobMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*StopIngestionJobOutput) + out.ResultMetadata = metadata + return out, nil +} + +type StopIngestionJobInput struct { + + // The unique identifier of the data source for the data ingestion job you want to + // stop. + // + // This member is required. + DataSourceId *string + + // The unique identifier of the data ingestion job you want to stop. + // + // This member is required. + IngestionJobId *string + + // The unique identifier of the knowledge base for the data ingestion job you want + // to stop. + // + // This member is required. + KnowledgeBaseId *string + + noSmithyDocumentSerde +} + +type StopIngestionJobOutput struct { + + // Contains information about the stopped data ingestion job. + // + // This member is required. + IngestionJob *types.IngestionJob + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationStopIngestionJobMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestjson1_serializeOpStopIngestionJob{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestjson1_deserializeOpStopIngestionJob{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "StopIngestionJob"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addSpanRetryLoop(stack, options); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addTimeOffsetBuild(stack, c); err != nil { + return err + } + if err = addUserAgentRetryMode(stack, options); err != nil { + return err + } + if err = addOpStopIngestionJobValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStopIngestionJob(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSpanInitializeStart(stack); err != nil { + return err + } + if err = addSpanInitializeEnd(stack); err != nil { + return err + } + if err = addSpanBuildRequestStart(stack); err != nil { + return err + } + if err = addSpanBuildRequestEnd(stack); err != nil { + return err + } + return nil +} + +func newServiceMetadataMiddleware_opStopIngestionJob(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "StopIngestionJob", + } +} diff --git a/service/bedrockagent/deserializers.go b/service/bedrockagent/deserializers.go index 704934939e7..5b635622497 100644 --- a/service/bedrockagent/deserializers.go +++ b/service/bedrockagent/deserializers.go @@ -9266,6 +9266,173 @@ func awsRestjson1_deserializeOpDocumentStartIngestionJobOutput(v **StartIngestio return nil } +type awsRestjson1_deserializeOpStopIngestionJob struct { +} + +func (*awsRestjson1_deserializeOpStopIngestionJob) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestjson1_deserializeOpStopIngestionJob) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + _, span := tracing.StartSpan(ctx, "OperationDeserializer") + endTimer := startMetricTimer(ctx, "client.call.deserialization_duration") + defer endTimer() + defer span.End() + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestjson1_deserializeOpErrorStopIngestionJob(response, &metadata) + } + output := &StopIngestionJobOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsRestjson1_deserializeOpDocumentStopIngestionJobOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + span.End() + return out, metadata, err +} + +func awsRestjson1_deserializeOpErrorStopIngestionJob(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + if len(headerCode) != 0 { + errorCode = restjson.SanitizeErrorCode(headerCode) + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + jsonCode, message, err := restjson.GetErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if len(headerCode) == 0 && len(jsonCode) != 0 { + errorCode = restjson.SanitizeErrorCode(jsonCode) + } + if len(message) != 0 { + errorMessage = message + } + + switch { + case strings.EqualFold("AccessDeniedException", errorCode): + return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody) + + case strings.EqualFold("ConflictException", errorCode): + return awsRestjson1_deserializeErrorConflictException(response, errorBody) + + case strings.EqualFold("InternalServerException", errorCode): + return awsRestjson1_deserializeErrorInternalServerException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsRestjson1_deserializeErrorResourceNotFoundException(response, errorBody) + + case strings.EqualFold("ThrottlingException", errorCode): + return awsRestjson1_deserializeErrorThrottlingException(response, errorBody) + + case strings.EqualFold("ValidationException", errorCode): + return awsRestjson1_deserializeErrorValidationException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestjson1_deserializeOpDocumentStopIngestionJobOutput(v **StopIngestionJobOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *StopIngestionJobOutput + if *v == nil { + sv = &StopIngestionJobOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "ingestionJob": + if err := awsRestjson1_deserializeDocumentIngestionJob(&sv.IngestionJob, value); err != nil { + return err + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + type awsRestjson1_deserializeOpTagResource struct { } diff --git a/service/bedrockagent/generated.json b/service/bedrockagent/generated.json index c516c308deb..e64e6c90664 100644 --- a/service/bedrockagent/generated.json +++ b/service/bedrockagent/generated.json @@ -58,6 +58,7 @@ "api_op_PrepareAgent.go", "api_op_PrepareFlow.go", "api_op_StartIngestionJob.go", + "api_op_StopIngestionJob.go", "api_op_TagResource.go", "api_op_UntagResource.go", "api_op_UpdateAgent.go", diff --git a/service/bedrockagent/serializers.go b/service/bedrockagent/serializers.go index 70ed15948ba..f1bff887c82 100644 --- a/service/bedrockagent/serializers.go +++ b/service/bedrockagent/serializers.go @@ -4606,6 +4606,95 @@ func awsRestjson1_serializeOpDocumentStartIngestionJobInput(v *StartIngestionJob return nil } +type awsRestjson1_serializeOpStopIngestionJob struct { +} + +func (*awsRestjson1_serializeOpStopIngestionJob) ID() string { + return "OperationSerializer" +} + +func (m *awsRestjson1_serializeOpStopIngestionJob) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + _, span := tracing.StartSpan(ctx, "OperationSerializer") + endTimer := startMetricTimer(ctx, "client.call.serialization_duration") + defer endTimer() + defer span.End() + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*StopIngestionJobInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/knowledgebases/{knowledgeBaseId}/datasources/{dataSourceId}/ingestionjobs/{ingestionJobId}/stop") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "POST" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestjson1_serializeOpHttpBindingsStopIngestionJobInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + endTimer() + span.End() + return next.HandleSerialize(ctx, in) +} +func awsRestjson1_serializeOpHttpBindingsStopIngestionJobInput(v *StopIngestionJobInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.DataSourceId == nil || len(*v.DataSourceId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member dataSourceId must not be empty")} + } + if v.DataSourceId != nil { + if err := encoder.SetURI("dataSourceId").String(*v.DataSourceId); err != nil { + return err + } + } + + if v.IngestionJobId == nil || len(*v.IngestionJobId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member ingestionJobId must not be empty")} + } + if v.IngestionJobId != nil { + if err := encoder.SetURI("ingestionJobId").String(*v.IngestionJobId); err != nil { + return err + } + } + + if v.KnowledgeBaseId == nil || len(*v.KnowledgeBaseId) == 0 { + return &smithy.SerializationError{Err: fmt.Errorf("input member knowledgeBaseId must not be empty")} + } + if v.KnowledgeBaseId != nil { + if err := encoder.SetURI("knowledgeBaseId").String(*v.KnowledgeBaseId); err != nil { + return err + } + } + + return nil +} + type awsRestjson1_serializeOpTagResource struct { } diff --git a/service/bedrockagent/snapshot/api_op_StopIngestionJob.go.snap b/service/bedrockagent/snapshot/api_op_StopIngestionJob.go.snap new file mode 100644 index 00000000000..7775017f162 --- /dev/null +++ b/service/bedrockagent/snapshot/api_op_StopIngestionJob.go.snap @@ -0,0 +1,41 @@ +StopIngestionJob + Initialize stack step + spanInitializeStart + RegisterServiceMetadata + legacyEndpointContextSetter + SetLogger + OperationInputValidation + spanInitializeEnd + Serialize stack step + spanBuildRequestStart + setOperationInput + ResolveEndpoint + OperationSerializer + Build stack step + ClientRequestID + ComputeContentLength + UserAgent + AddTimeOffsetMiddleware + RecursionDetection + spanBuildRequestEnd + Finalize stack step + ResolveAuthScheme + GetIdentity + ResolveEndpointV2 + disableHTTPS + ComputePayloadHash + spanRetryLoop + Retry + RetryMetricsHeader + setLegacyContextSigningOptions + Signing + Deserialize stack step + AddRawResponseToMetadata + ErrorCloseResponseBody + CloseResponseBody + ResponseErrorWrapper + RequestIDRetriever + OperationDeserializer + AddTimeOffsetMiddleware + RecordResponseTiming + RequestResponseLogger diff --git a/service/bedrockagent/snapshot_test.go b/service/bedrockagent/snapshot_test.go index 59903078d26..b19b52979e5 100644 --- a/service/bedrockagent/snapshot_test.go +++ b/service/bedrockagent/snapshot_test.go @@ -662,6 +662,18 @@ func TestCheckSnapshot_StartIngestionJob(t *testing.T) { } } +func TestCheckSnapshot_StopIngestionJob(t *testing.T) { + svc := New(Options{}) + _, err := svc.StopIngestionJob(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return testSnapshot(stack, "StopIngestionJob") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestCheckSnapshot_TagResource(t *testing.T) { svc := New(Options{}) _, err := svc.TagResource(context.Background(), nil, func(o *Options) { @@ -1393,6 +1405,18 @@ func TestUpdateSnapshot_StartIngestionJob(t *testing.T) { } } +func TestUpdateSnapshot_StopIngestionJob(t *testing.T) { + svc := New(Options{}) + _, err := svc.StopIngestionJob(context.Background(), nil, func(o *Options) { + o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error { + return updateSnapshot(stack, "StopIngestionJob") + }) + }) + if _, ok := err.(snapshotOK); !ok && err != nil { + t.Fatal(err) + } +} + func TestUpdateSnapshot_TagResource(t *testing.T) { svc := New(Options{}) _, err := svc.TagResource(context.Background(), nil, func(o *Options) { diff --git a/service/bedrockagent/types/enums.go b/service/bedrockagent/types/enums.go index 19bc0435491..a7fa1e53a61 100644 --- a/service/bedrockagent/types/enums.go +++ b/service/bedrockagent/types/enums.go @@ -460,6 +460,8 @@ const ( IngestionJobStatusInProgress IngestionJobStatus = "IN_PROGRESS" IngestionJobStatusComplete IngestionJobStatus = "COMPLETE" IngestionJobStatusFailed IngestionJobStatus = "FAILED" + IngestionJobStatusStopping IngestionJobStatus = "STOPPING" + IngestionJobStatusStopped IngestionJobStatus = "STOPPED" ) // Values returns all known values for IngestionJobStatus. Note that this can be @@ -472,6 +474,8 @@ func (IngestionJobStatus) Values() []IngestionJobStatus { "IN_PROGRESS", "COMPLETE", "FAILED", + "STOPPING", + "STOPPED", } } diff --git a/service/bedrockagent/types/types.go b/service/bedrockagent/types/types.go index 361d6d6951f..4a6df4889d7 100644 --- a/service/bedrockagent/types/types.go +++ b/service/bedrockagent/types/types.go @@ -1736,8 +1736,8 @@ type InferenceConfiguration struct { noSmithyDocumentSerde } -// Contains details about an ingestion job, which converts a data source to -// embeddings for a vector store in knowledge base. +// Contains details about a data ingestion job. Data sources are ingested into a +// knowledge base so that Large Language Models (LLMs) can use your data. // // This data type is used in the following API operations: // @@ -1752,63 +1752,68 @@ type InferenceConfiguration struct { // [GetIngestionJob response]: https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_GetIngestionJob.html#API_agent_GetIngestionJob_ResponseSyntax type IngestionJob struct { - // The unique identifier of the ingested data source. + // The unique identifier of the data source for the data ingestion job. // // This member is required. DataSourceId *string - // The unique identifier of the ingestion job. + // The unique identifier of the data ingestion job. // // This member is required. IngestionJobId *string - // The unique identifier of the knowledge base to which the data source is being - // added. + // The unique identifier of the knowledge for the data ingestion job. // // This member is required. KnowledgeBaseId *string - // The time at which the ingestion job started. + // The time the data ingestion job started. + // + // If you stop a data ingestion job, the startedAt time is the time the job was + // started before the job was stopped. // // This member is required. StartedAt *time.Time - // The status of the ingestion job. + // The status of the data ingestion job. // // This member is required. Status IngestionJobStatus - // The time at which the ingestion job was last updated. + // The time the data ingestion job was last updated. + // + // If you stop a data ingestion job, the updatedAt time is the time the job was + // stopped. // // This member is required. UpdatedAt *time.Time - // The description of the ingestion job. + // The description of the data ingestion job. Description *string - // A list of reasons that the ingestion job failed. + // A list of reasons that the data ingestion job failed. FailureReasons []string - // Contains statistics about the ingestion job. + // Contains statistics about the data ingestion job. Statistics *IngestionJobStatistics noSmithyDocumentSerde } -// Defines a filter by which to filter the results. +// The definition of a filter to filter the data. type IngestionJobFilter struct { - // The attribute by which to filter the results. + // The name of field or attribute to apply the filter. // // This member is required. Attribute IngestionJobFilterAttribute - // The operation to carry out between the attribute and the values. + // The operation to apply to the field or attribute. // // This member is required. Operator IngestionJobFilterOperator - // A list of values for the attribute. + // A list of values that belong to the field or attribute. // // This member is required. Values []string @@ -1816,15 +1821,15 @@ type IngestionJobFilter struct { noSmithyDocumentSerde } -// Parameters by which to sort the results. +// The parameters of sorting the data. type IngestionJobSortBy struct { - // The attribute by which to sort the results. + // The name of field or attribute to apply sorting of data. // // This member is required. Attribute IngestionJobSortByAttribute - // The order by which to sort the results. + // The order for sorting the data. // // This member is required. Order SortOrder @@ -1832,10 +1837,10 @@ type IngestionJobSortBy struct { noSmithyDocumentSerde } -// Contains the statistics for the ingestion job. +// Contains the statistics for the data ingestion job. type IngestionJobStatistics struct { - // The number of source documents that was deleted. + // The number of source documents that were deleted. NumberOfDocumentsDeleted int64 // The number of source documents that failed to be ingested. @@ -1863,43 +1868,43 @@ type IngestionJobStatistics struct { noSmithyDocumentSerde } -// Contains details about an ingestion job. +// Contains details about a data ingestion job. type IngestionJobSummary struct { - // The unique identifier of the data source in the ingestion job. + // The unique identifier of the data source for the data ingestion job. // // This member is required. DataSourceId *string - // The unique identifier of the ingestion job. + // The unique identifier of the data ingestion job. // // This member is required. IngestionJobId *string - // The unique identifier of the knowledge base to which the data source is added. + // The unique identifier of the knowledge base for the data ingestion job. // // This member is required. KnowledgeBaseId *string - // The time at which the ingestion job was started. + // The time the data ingestion job started. // // This member is required. StartedAt *time.Time - // The status of the ingestion job. + // The status of the data ingestion job. // // This member is required. Status IngestionJobStatus - // The time at which the ingestion job was last updated. + // The time the data ingestion job was last updated. // // This member is required. UpdatedAt *time.Time - // The description of the ingestion job. + // The description of the data ingestion job. Description *string - // Contains statistics for the ingestion job. + // Contains statistics for the data ingestion job. Statistics *IngestionJobStatistics noSmithyDocumentSerde @@ -1938,7 +1943,7 @@ type IteratorFlowNodeConfiguration struct { // Contains information about a knowledge base. type KnowledgeBase struct { - // The time at which the knowledge base was created. + // The time the knowledge base was created. // // This member is required. CreatedAt *time.Time @@ -1989,7 +1994,7 @@ type KnowledgeBase struct { // This member is required. StorageConfiguration *StorageConfiguration - // The time at which the knowledge base was last updated. + // The time the knowledge base was last updated. // // This member is required. UpdatedAt *time.Time @@ -2003,7 +2008,8 @@ type KnowledgeBase struct { noSmithyDocumentSerde } -// Contains details about the embeddings configuration of the knowledge base. +// Contains details about the vector embeddings configuration of the knowledge +// base. type KnowledgeBaseConfiguration struct { // The type of data that the data source is converted into for the knowledge base. @@ -2011,8 +2017,8 @@ type KnowledgeBaseConfiguration struct { // This member is required. Type KnowledgeBaseType - // Contains details about the embeddings model that'sused to convert the data - // source. + // Contains details about the model that's used to convert the data source into + // vector embeddings. VectorKnowledgeBaseConfiguration *VectorKnowledgeBaseConfiguration noSmithyDocumentSerde @@ -2059,7 +2065,7 @@ type KnowledgeBaseSummary struct { // This member is required. Status KnowledgeBaseStatus - // The time at which the knowledge base was last updated. + // The time the knowledge base was last updated. // // This member is required. UpdatedAt *time.Time diff --git a/service/bedrockagent/validators.go b/service/bedrockagent/validators.go index 93da897a2fe..1ca07322b8e 100644 --- a/service/bedrockagent/validators.go +++ b/service/bedrockagent/validators.go @@ -930,6 +930,26 @@ func (m *validateOpStartIngestionJob) HandleInitialize(ctx context.Context, in m return next.HandleInitialize(ctx, in) } +type validateOpStopIngestionJob struct { +} + +func (*validateOpStopIngestionJob) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpStopIngestionJob) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*StopIngestionJobInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpStopIngestionJobInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpTagResource struct { } @@ -1334,6 +1354,10 @@ func addOpStartIngestionJobValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpStartIngestionJob{}, middleware.After) } +func addOpStopIngestionJobValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpStopIngestionJob{}, middleware.After) +} + func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpTagResource{}, middleware.After) } @@ -4144,6 +4168,27 @@ func validateOpStartIngestionJobInput(v *StartIngestionJobInput) error { } } +func validateOpStopIngestionJobInput(v *StopIngestionJobInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "StopIngestionJobInput"} + if v.KnowledgeBaseId == nil { + invalidParams.Add(smithy.NewErrParamRequired("KnowledgeBaseId")) + } + if v.DataSourceId == nil { + invalidParams.Add(smithy.NewErrParamRequired("DataSourceId")) + } + if v.IngestionJobId == nil { + invalidParams.Add(smithy.NewErrParamRequired("IngestionJobId")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpTagResourceInput(v *TagResourceInput) error { if v == nil { return nil diff --git a/service/codeartifact/api_op_GetRepositoryEndpoint.go b/service/codeartifact/api_op_GetRepositoryEndpoint.go index 22af239b4bc..5fdb694ac8c 100644 --- a/service/codeartifact/api_op_GetRepositoryEndpoint.go +++ b/service/codeartifact/api_op_GetRepositoryEndpoint.go @@ -67,6 +67,9 @@ type GetRepositoryEndpointInput struct { // domain that contains the repository. It does not include dashes or spaces. DomainOwner *string + // A string that specifies the type of endpoint. + EndpointType types.EndpointType + noSmithyDocumentSerde } diff --git a/service/codeartifact/serializers.go b/service/codeartifact/serializers.go index 70b2d61c458..f166326149c 100644 --- a/service/codeartifact/serializers.go +++ b/service/codeartifact/serializers.go @@ -2190,6 +2190,10 @@ func awsRestjson1_serializeOpHttpBindingsGetRepositoryEndpointInput(v *GetReposi encoder.SetQuery("domain-owner").String(*v.DomainOwner) } + if len(v.EndpointType) > 0 { + encoder.SetQuery("endpointType").String(string(v.EndpointType)) + } + if len(v.Format) > 0 { encoder.SetQuery("format").String(string(v.Format)) } diff --git a/service/codeartifact/types/enums.go b/service/codeartifact/types/enums.go index 17e7ae1ddba..78671481eb9 100644 --- a/service/codeartifact/types/enums.go +++ b/service/codeartifact/types/enums.go @@ -59,6 +59,25 @@ func (DomainStatus) Values() []DomainStatus { } } +type EndpointType string + +// Enum values for EndpointType +const ( + EndpointTypeDualstack EndpointType = "dualstack" + EndpointTypeIpv4 EndpointType = "ipv4" +) + +// Values returns all known values for EndpointType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (EndpointType) Values() []EndpointType { + return []EndpointType{ + "dualstack", + "ipv4", + } +} + type ExternalConnectionStatus string // Enum values for ExternalConnectionStatus diff --git a/service/codeartifact/types/types.go b/service/codeartifact/types/types.go index 684756b2e64..9a845a9f9aa 100644 --- a/service/codeartifact/types/types.go +++ b/service/codeartifact/types/types.go @@ -631,6 +631,14 @@ type RepositoryExternalConnectionInfo struct { // format, such as a JAR file. // // - nuget : A NuGet package. + // + // - generic : A generic package. + // + // - ruby : A Ruby package. + // + // - swift : A Swift package. + // + // - cargo : A Cargo package. PackageFormat PackageFormat // The status of the external connection of a repository. There is one valid diff --git a/service/location/internal/endpoints/endpoints.go b/service/location/internal/endpoints/endpoints.go index 7acc845af0a..86c6e9816ad 100644 --- a/service/location/internal/endpoints/endpoints.go +++ b/service/location/internal/endpoints/endpoints.go @@ -160,6 +160,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-north-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{}, diff --git a/service/memorydb/internal/endpoints/endpoints.go b/service/memorydb/internal/endpoints/endpoints.go index 486fa267692..4e8873c1ade 100644 --- a/service/memorydb/internal/endpoints/endpoints.go +++ b/service/memorydb/internal/endpoints/endpoints.go @@ -169,6 +169,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "eu-south-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{}, diff --git a/service/opensearchserverless/internal/endpoints/endpoints.go b/service/opensearchserverless/internal/endpoints/endpoints.go index 81004422ad4..8a0b87006ea 100644 --- a/service/opensearchserverless/internal/endpoints/endpoints.go +++ b/service/opensearchserverless/internal/endpoints/endpoints.go @@ -142,6 +142,9 @@ var defaultPartitions = endpoints.Partitions{ endpoints.EndpointKey{ Region: "ap-northeast-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ap-northeast-2", + }: endpoints.Endpoint{}, endpoints.EndpointKey{ Region: "ap-south-1", }: endpoints.Endpoint{}, diff --git a/service/rds/api_op_CreateDBCluster.go b/service/rds/api_op_CreateDBCluster.go index 7ab487b41a6..b4983a8ba4d 100644 --- a/service/rds/api_op_CreateDBCluster.go +++ b/service/rds/api_op_CreateDBCluster.go @@ -157,6 +157,15 @@ type CreateDBClusterInput struct { // Valid for Cluster Type: Aurora DB clusters only CharacterSetName *string + // Specifies the scalability mode of the Aurora DB cluster. When set to limitless , + // the cluster operates as an Aurora Limitless Database. When set to standard (the + // default), the cluster uses normal DB instance creation. + // + // Valid for: Aurora DB clusters only + // + // You can't modify this setting after you create the DB cluster. + ClusterScalabilityType types.ClusterScalabilityType + // Specifies whether to copy all tags from the DB cluster to snapshots of the DB // cluster. The default is not to copy them. // diff --git a/service/rds/api_op_CreateDBShardGroup.go b/service/rds/api_op_CreateDBShardGroup.go index fca85ca51a9..0acb3488de9 100644 --- a/service/rds/api_op_CreateDBShardGroup.go +++ b/service/rds/api_op_CreateDBShardGroup.go @@ -6,6 +6,7 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -96,6 +97,15 @@ type CreateDBShardGroupInput struct { // it, the DB shard group is public. PubliclyAccessible *bool + // A list of tags. + // + // For more information, see [Tagging Amazon RDS resources] in the Amazon RDS User Guide or [Tagging Amazon Aurora and Amazon RDS resources] in the Amazon + // Aurora User Guide. + // + // [Tagging Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html + // [Tagging Amazon Aurora and Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html + Tags []types.Tag + noSmithyDocumentSerde } @@ -156,6 +166,15 @@ type CreateDBShardGroupOutput struct { // The status of the DB shard group. Status *string + // A list of tags. + // + // For more information, see [Tagging Amazon RDS resources] in the Amazon RDS User Guide or [Tagging Amazon Aurora and Amazon RDS resources] in the Amazon + // Aurora User Guide. + // + // [Tagging Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html + // [Tagging Amazon Aurora and Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html + TagList []types.Tag + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata diff --git a/service/rds/api_op_DeleteDBShardGroup.go b/service/rds/api_op_DeleteDBShardGroup.go index 12f347f6662..e5516c533c0 100644 --- a/service/rds/api_op_DeleteDBShardGroup.go +++ b/service/rds/api_op_DeleteDBShardGroup.go @@ -6,6 +6,7 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -93,6 +94,15 @@ type DeleteDBShardGroupOutput struct { // The status of the DB shard group. Status *string + // A list of tags. + // + // For more information, see [Tagging Amazon RDS resources] in the Amazon RDS User Guide or [Tagging Amazon Aurora and Amazon RDS resources] in the Amazon + // Aurora User Guide. + // + // [Tagging Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html + // [Tagging Amazon Aurora and Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html + TagList []types.Tag + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata diff --git a/service/rds/api_op_ModifyDBShardGroup.go b/service/rds/api_op_ModifyDBShardGroup.go index 573c44a73c7..f8d4b76b79c 100644 --- a/service/rds/api_op_ModifyDBShardGroup.go +++ b/service/rds/api_op_ModifyDBShardGroup.go @@ -6,6 +6,7 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -114,6 +115,15 @@ type ModifyDBShardGroupOutput struct { // The status of the DB shard group. Status *string + // A list of tags. + // + // For more information, see [Tagging Amazon RDS resources] in the Amazon RDS User Guide or [Tagging Amazon Aurora and Amazon RDS resources] in the Amazon + // Aurora User Guide. + // + // [Tagging Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html + // [Tagging Amazon Aurora and Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html + TagList []types.Tag + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata diff --git a/service/rds/api_op_RebootDBShardGroup.go b/service/rds/api_op_RebootDBShardGroup.go index 1d370deb79c..84a7fd61edc 100644 --- a/service/rds/api_op_RebootDBShardGroup.go +++ b/service/rds/api_op_RebootDBShardGroup.go @@ -6,6 +6,7 @@ import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/service/rds/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -97,6 +98,15 @@ type RebootDBShardGroupOutput struct { // The status of the DB shard group. Status *string + // A list of tags. + // + // For more information, see [Tagging Amazon RDS resources] in the Amazon RDS User Guide or [Tagging Amazon Aurora and Amazon RDS resources] in the Amazon + // Aurora User Guide. + // + // [Tagging Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html + // [Tagging Amazon Aurora and Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html + TagList []types.Tag + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata diff --git a/service/rds/deserializers.go b/service/rds/deserializers.go index cf34eea976c..b48e9e42e27 100644 --- a/service/rds/deserializers.go +++ b/service/rds/deserializers.go @@ -3247,6 +3247,9 @@ func awsAwsquery_deserializeOpErrorCreateDBShardGroup(response *smithyhttp.Respo case strings.EqualFold("MaxDBShardGroupLimitReached", errorCode): return awsAwsquery_deserializeErrorMaxDBShardGroupLimitReached(response, errorBody) + case strings.EqualFold("NetworkTypeNotSupported", errorCode): + return awsAwsquery_deserializeErrorNetworkTypeNotSupported(response, errorBody) + case strings.EqualFold("UnsupportedDBEngineVersion", errorCode): return awsAwsquery_deserializeErrorUnsupportedDBEngineVersionFault(response, errorBody) @@ -28369,6 +28372,19 @@ func awsAwsquery_deserializeDocumentDBCluster(v **types.DBCluster, decoder smith sv.ClusterCreateTime = ptr.Time(t) } + case strings.EqualFold("ClusterScalabilityType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.ClusterScalabilityType = types.ClusterScalabilityType(xtv) + } + case strings.EqualFold("CopyTagsToSnapshot", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -37935,6 +37951,12 @@ func awsAwsquery_deserializeDocumentDBShardGroup(v **types.DBShardGroup, decoder sv.Status = ptr.String(xtv) } + case strings.EqualFold("TagList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentTagList(&sv.TagList, nodeDecoder); err != nil { + return err + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -57338,6 +57360,12 @@ func awsAwsquery_deserializeOpDocumentCreateDBShardGroupOutput(v **CreateDBShard sv.Status = ptr.String(xtv) } + case strings.EqualFold("TagList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentTagList(&sv.TagList, nodeDecoder); err != nil { + return err + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -58866,6 +58894,12 @@ func awsAwsquery_deserializeOpDocumentDeleteDBShardGroupOutput(v **DeleteDBShard sv.Status = ptr.String(xtv) } + case strings.EqualFold("TagList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentTagList(&sv.TagList, nodeDecoder); err != nil { + return err + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -63405,6 +63439,12 @@ func awsAwsquery_deserializeOpDocumentModifyDBShardGroupOutput(v **ModifyDBShard sv.Status = ptr.String(xtv) } + case strings.EqualFold("TagList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentTagList(&sv.TagList, nodeDecoder); err != nil { + return err + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() @@ -64265,6 +64305,12 @@ func awsAwsquery_deserializeOpDocumentRebootDBShardGroupOutput(v **RebootDBShard sv.Status = ptr.String(xtv) } + case strings.EqualFold("TagList", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsAwsquery_deserializeDocumentTagList(&sv.TagList, nodeDecoder); err != nil { + return err + } + default: // Do nothing and ignore the unexpected tag element err = decoder.Decoder.Skip() diff --git a/service/rds/serializers.go b/service/rds/serializers.go index 3807abc3dc6..3db265fa59a 100644 --- a/service/rds/serializers.go +++ b/service/rds/serializers.go @@ -12582,6 +12582,11 @@ func awsAwsquery_serializeOpDocumentCreateDBClusterInput(v *CreateDBClusterInput objectKey.String(*v.CharacterSetName) } + if len(v.ClusterScalabilityType) > 0 { + objectKey := object.Key("ClusterScalabilityType") + objectKey.String(string(v.ClusterScalabilityType)) + } + if v.CopyTagsToSnapshot != nil { objectKey := object.Key("CopyTagsToSnapshot") objectKey.Boolean(*v.CopyTagsToSnapshot) @@ -13692,6 +13697,13 @@ func awsAwsquery_serializeOpDocumentCreateDBShardGroupInput(v *CreateDBShardGrou objectKey.Boolean(*v.PubliclyAccessible) } + if v.Tags != nil { + objectKey := object.Key("Tags") + if err := awsAwsquery_serializeDocumentTagList(v.Tags, objectKey); err != nil { + return err + } + } + return nil } diff --git a/service/rds/types/enums.go b/service/rds/types/enums.go index 655512e87d4..03bc20594d2 100644 --- a/service/rds/types/enums.go +++ b/service/rds/types/enums.go @@ -164,6 +164,25 @@ func (ClientPasswordAuthType) Values() []ClientPasswordAuthType { } } +type ClusterScalabilityType string + +// Enum values for ClusterScalabilityType +const ( + ClusterScalabilityTypeStandard ClusterScalabilityType = "standard" + ClusterScalabilityTypeLimitless ClusterScalabilityType = "limitless" +) + +// Values returns all known values for ClusterScalabilityType. Note that this can +// be expanded in the future, and so it is only as up to date as the client. +// +// The ordering of this slice is not guaranteed to be stable across updates. +func (ClusterScalabilityType) Values() []ClusterScalabilityType { + return []ClusterScalabilityType{ + "standard", + "limitless", + } +} + type CustomEngineVersionStatus string // Enum values for CustomEngineVersionStatus diff --git a/service/rds/types/types.go b/service/rds/types/types.go index cad328c4c74..a25436da82f 100644 --- a/service/rds/types/types.go +++ b/service/rds/types/types.go @@ -628,6 +628,11 @@ type DBCluster struct { // The time when the DB cluster was created, in Universal Coordinated Time (UTC). ClusterCreateTime *time.Time + // The scalability mode of the Aurora DB cluster. When set to limitless , the + // cluster operates as an Aurora Limitless Database. When set to standard (the + // default), the cluster uses normal DB instance creation. + ClusterScalabilityType ClusterScalabilityType + // Indicates whether tags are copied from the DB cluster to snapshots of the DB // cluster. CopyTagsToSnapshot *bool @@ -2723,6 +2728,15 @@ type DBShardGroup struct { // The status of the DB shard group. Status *string + // A list of tags. + // + // For more information, see [Tagging Amazon RDS resources] in the Amazon RDS User Guide or [Tagging Amazon Aurora and Amazon RDS resources] in the Amazon + // Aurora User Guide. + // + // [Tagging Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html + // [Tagging Amazon Aurora and Amazon RDS resources]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html + TagList []Tag + noSmithyDocumentSerde }