From 84ead9b93e964301058592e07c2dedaeb60c6a48 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Sun, 28 Apr 2024 17:24:46 +0100 Subject: [PATCH 01/15] Initial commit --- internal/service/bedrockagent/data_source.go | 816 ++++++++++++++++++ .../service/bedrockagent/data_source_test.go | 333 +++++++ .../r/bedrockagent_data_source.html.markdown | 69 ++ 3 files changed, 1218 insertions(+) create mode 100644 internal/service/bedrockagent/data_source.go create mode 100644 internal/service/bedrockagent/data_source_test.go create mode 100644 website/docs/r/bedrockagent_data_source.html.markdown diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go new file mode 100644 index 00000000000..9e62b4c1c08 --- /dev/null +++ b/internal/service/bedrockagent/data_source.go @@ -0,0 +1,816 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package bedrockagent + +import ( + "context" + "errors" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockagent" + awstypes "github.com/aws/aws-sdk-go-v2/service/bedrockagent/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for resource registration to the Provider. DO NOT EDIT. +// @FrameworkResource(name="Data Source") +func newResourceDataSource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceDataSource{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + r.SetDefaultUpdateTimeout(30 * time.Minute) + r.SetDefaultDeleteTimeout(30 * time.Minute) + + return r, nil +} + +const ( + ResNameDataSource = "Data Source" +) + +type resourceDataSource struct { + framework.ResourceWithConfigure + framework.WithTimeouts +} + +func (r *resourceDataSource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_bedrockagent_data_source" +} + +func (r *resourceDataSource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "arn": framework.ARNAttributeComputedOnly(), + "data_deletion_policy": schema.StringAttribute{ + Optional: true, + }, + "description": schema.StringAttribute{ + Optional: true, + }, + "id": framework.IDAttribute(), + "knowledge_base_id": schema.StringAttribute{ + Required: true, + }, + "name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + Blocks: map[string]schema.Block{ + "server_side_encryption_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[vectorKnowledgeBaseConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "kms_key_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + }, + }, + }, + }, + "data_source_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[knowledgeBaseConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "type": schema.StringAttribute{ + Required: true, + }, + }, + Blocks: map[string]schema.Block{ + "s3_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[vectorKnowledgeBaseConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "bucket_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + }, + "bucket_owner_account_id": schema.StringAttribute{ + Required: true, + }, + "inclusion_prefixes": schema.SetAttribute{ + CustomType: fwtypes.SetOfStringType, + ElementType: types.StringType, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "vector_ingestion_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[storageConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtLeast(1), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "chunking_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[pineconeConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "chunking_strategy": schema.StringAttribute{ + Required: true, + }, + }, + Blocks: map[string]schema.Block{ + "fixed_size_chunking_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[pineconeFieldMappingModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "max_tokens": schema.NumberAttribute{ + Optional: true, + }, + "overlap_percentage": schema.NumberAttribute{ + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "timeouts": timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func (r *resourceDataSource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // TIP: ==== RESOURCE CREATE ==== + // Generally, the Create function should do the following things. Make + // sure there is a good reason if you don't do one of these. + // + // 1. Get a client connection to the relevant service + // 2. Fetch the plan + // 3. Populate a create input structure + // 4. Call the AWS create/put function + // 5. Using the output from the create function, set the minimum arguments + // and attributes for the Read function to work, as well as any computed + // only attributes. + // 6. Use a waiter to wait for create to complete + // 7. Save the request plan to response state + + // TIP: -- 1. Get a client connection to the relevant service + conn := r.Meta().BedrockAgentClient(ctx) + + // TIP: -- 2. Fetch the plan + var plan resourceDataSourceData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + + // TIP: -- 3. Populate a create input structure + in := &bedrockagent.CreateDataSourceInput{ + // TIP: Mandatory or fields that will always be present can be set when + // you create the Input structure. (Replace these with real fields.) + DataSourceName: aws.String(plan.Name.ValueString()), + DataSourceType: aws.String(plan.Type.ValueString()), + } + + if !plan.Description.IsNull() { + // TIP: Optional fields should be set based on whether or not they are + // used. + in.Description = aws.String(plan.Description.ValueString()) + } + if !plan.ComplexArgument.IsNull() { + // TIP: Use an expander to assign a complex argument. The elements must be + // deserialized into the appropriate struct before being passed to the expander. + var tfList []complexArgumentData + resp.Diagnostics.Append(plan.ComplexArgument.ElementsAs(ctx, &tfList, false)...) + if resp.Diagnostics.HasError() { + return + } + + in.ComplexArgument = expandComplexArgument(tfList) + } + + // TIP: -- 4. Call the AWS create function + out, err := conn.CreateDataSource(ctx, in) + if err != nil { + // TIP: Since ID has not been set yet, you cannot use plan.ID.String() + // in error messages at this point. + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionCreating, ResNameDataSource, plan.Name.String(), err), + err.Error(), + ) + return + } + if out == nil || out.DataSource == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionCreating, ResNameDataSource, plan.Name.String(), nil), + errors.New("empty output").Error(), + ) + return + } + + // TIP: -- 5. Using the output from the create function, set the minimum attributes + plan.ARN = flex.StringToFramework(ctx, out.DataSource.Arn) + plan.ID = flex.StringToFramework(ctx, out.DataSource.DataSourceId) + + // TIP: -- 6. Use a waiter to wait for create to complete + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + _, err = waitDataSourceCreated(ctx, conn, plan.ID.ValueString(), createTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionWaitingForCreation, ResNameDataSource, plan.Name.String(), err), + err.Error(), + ) + return + } + + // TIP: -- 7. Save the request plan to response state + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} + +func (r *resourceDataSource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // TIP: ==== RESOURCE READ ==== + // Generally, the Read function should do the following things. Make + // sure there is a good reason if you don't do one of these. + // + // 1. Get a client connection to the relevant service + // 2. Fetch the state + // 3. Get the resource from AWS + // 4. Remove resource from state if it is not found + // 5. Set the arguments and attributes + // 6. Set the state + + // TIP: -- 1. Get a client connection to the relevant service + conn := r.Meta().BedrockAgentClient(ctx) + + // TIP: -- 2. Fetch the state + var state resourceDataSourceData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + // TIP: -- 3. Get the resource from AWS using an API Get, List, or Describe- + // type function, or, better yet, using a finder. + out, err := findDataSourceByID(ctx, conn, state.ID.ValueString()) + // TIP: -- 4. Remove resource from state if it is not found + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionSetting, ResNameDataSource, state.ID.String(), err), + err.Error(), + ) + return + } + + // TIP: -- 5. Set the arguments and attributes + // + // For simple data types (i.e., schema.StringAttribute, schema.BoolAttribute, + // schema.Int64Attribute, and schema.Float64Attribue), simply setting the + // appropriate data struct field is sufficient. The flex package implements + // helpers for converting between Go and Plugin-Framework types seamlessly. No + // error or nil checking is necessary. + // + // However, there are some situations where more handling is needed such as + // complex data types (e.g., schema.ListAttribute, schema.SetAttribute). In + // these cases the flatten function may have a diagnostics return value, which + // should be appended to resp.Diagnostics. + state.ARN = flex.StringToFramework(ctx, out.Arn) + state.ID = flex.StringToFramework(ctx, out.DataSourceId) + state.Name = flex.StringToFramework(ctx, out.DataSourceName) + state.Type = flex.StringToFramework(ctx, out.DataSourceType) + + // TIP: Setting a complex type. + complexArgument, d := flattenComplexArgument(ctx, out.ComplexArgument) + resp.Diagnostics.Append(d...) + state.ComplexArgument = complexArgument + + // TIP: -- 6. Set the state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceDataSource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // TIP: ==== RESOURCE UPDATE ==== + // Not all resources have Update functions. There are a few reasons: + // a. The AWS API does not support changing a resource + // b. All arguments have RequiresReplace() plan modifiers + // c. The AWS API uses a create call to modify an existing resource + // + // In the cases of a. and b., the resource will not have an update method + // defined. In the case of c., Update and Create can be refactored to call + // the same underlying function. + // + // The rest of the time, there should be an Update function and it should + // do the following things. Make sure there is a good reason if you don't + // do one of these. + // + // 1. Get a client connection to the relevant service + // 2. Fetch the plan and state + // 3. Populate a modify input structure and check for changes + // 4. Call the AWS modify/update function + // 5. Use a waiter to wait for update to complete + // 6. Save the request plan to response state + // TIP: -- 1. Get a client connection to the relevant service + conn := r.Meta().BedrockAgentClient(ctx) + + // TIP: -- 2. Fetch the plan + var plan, state resourceDataSourceData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + // TIP: -- 3. Populate a modify input structure and check for changes + if !plan.Name.Equal(state.Name) || + !plan.Description.Equal(state.Description) || + !plan.ComplexArgument.Equal(state.ComplexArgument) || + !plan.Type.Equal(state.Type) { + + in := &bedrockagent.UpdateDataSourceInput{ + // TIP: Mandatory or fields that will always be present can be set when + // you create the Input structure. (Replace these with real fields.) + DataSourceId: aws.String(plan.ID.ValueString()), + DataSourceName: aws.String(plan.Name.ValueString()), + DataSourceType: aws.String(plan.Type.ValueString()), + } + + if !plan.Description.IsNull() { + // TIP: Optional fields should be set based on whether or not they are + // used. + in.Description = aws.String(plan.Description.ValueString()) + } + if !plan.ComplexArgument.IsNull() { + // TIP: Use an expander to assign a complex argument. The elements must be + // deserialized into the appropriate struct before being passed to the expander. + var tfList []complexArgumentData + resp.Diagnostics.Append(plan.ComplexArgument.ElementsAs(ctx, &tfList, false)...) + if resp.Diagnostics.HasError() { + return + } + + in.ComplexArgument = expandComplexArgument(tfList) + } + + // TIP: -- 4. Call the AWS modify/update function + out, err := conn.UpdateDataSource(ctx, in) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionUpdating, ResNameDataSource, plan.ID.String(), err), + err.Error(), + ) + return + } + if out == nil || out.DataSource == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionUpdating, ResNameDataSource, plan.ID.String(), nil), + errors.New("empty output").Error(), + ) + return + } + + // TIP: Using the output from the update function, re-set any computed attributes + plan.ARN = flex.StringToFramework(ctx, out.DataSource.Arn) + plan.ID = flex.StringToFramework(ctx, out.DataSource.DataSourceId) + } + + // TIP: -- 5. Use a waiter to wait for update to complete + updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) + _, err := waitDataSourceUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionWaitingForUpdate, ResNameDataSource, plan.ID.String(), err), + err.Error(), + ) + return + } + + // TIP: -- 6. Save the request plan to response state + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *resourceDataSource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // TIP: ==== RESOURCE DELETE ==== + // Most resources have Delete functions. There are rare situations + // where you might not need a delete: + // a. The AWS API does not provide a way to delete the resource + // b. The point of your resource is to perform an action (e.g., reboot a + // server) and deleting serves no purpose. + // + // The Delete function should do the following things. Make sure there + // is a good reason if you don't do one of these. + // + // 1. Get a client connection to the relevant service + // 2. Fetch the state + // 3. Populate a delete input structure + // 4. Call the AWS delete function + // 5. Use a waiter to wait for delete to complete + // TIP: -- 1. Get a client connection to the relevant service + conn := r.Meta().BedrockAgentClient(ctx) + + // TIP: -- 2. Fetch the state + var state resourceDataSourceData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + // TIP: -- 3. Populate a delete input structure + in := &bedrockagent.DeleteDataSourceInput{ + DataSourceId: aws.String(state.ID.ValueString()), + } + + // TIP: -- 4. Call the AWS delete function + _, err := conn.DeleteDataSource(ctx, in) + // TIP: On rare occassions, the API returns a not found error after deleting a + // resource. If that happens, we don't want it to show up as an error. + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionDeleting, ResNameDataSource, state.ID.String(), err), + err.Error(), + ) + return + } + + // TIP: -- 5. Use a waiter to wait for delete to complete + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitDataSourceDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionWaitingForDeletion, ResNameDataSource, state.ID.String(), err), + err.Error(), + ) + return + } +} + +// TIP: ==== TERRAFORM IMPORTING ==== +// If Read can get all the information it needs from the Identifier +// (i.e., path.Root("id")), you can use the PassthroughID importer. Otherwise, +// you'll need a custom import function. +// +// See more: +// https://developer.hashicorp.com/terraform/plugin/framework/resources/import +func (r *resourceDataSource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} + +// TIP: ==== STATUS CONSTANTS ==== +// Create constants for states and statuses if the service does not +// already have suitable constants. We prefer that you use the constants +// provided in the service if available (e.g., awstypes.StatusInProgress). +const ( + statusChangePending = "Pending" + statusDeleting = "Deleting" + statusNormal = "Normal" + statusUpdated = "Updated" +) + +// TIP: ==== WAITERS ==== +// Some resources of some services have waiters provided by the AWS API. +// Unless they do not work properly, use them rather than defining new ones +// here. +// +// Sometimes we define the wait, status, and find functions in separate +// files, wait.go, status.go, and find.go. Follow the pattern set out in the +// service and define these where it makes the most sense. +// +// If these functions are used in the _test.go file, they will need to be +// exported (i.e., capitalized). +// +// You will need to adjust the parameters and names to fit the service. +func waitDataSourceCreated(ctx context.Context, conn *bedrockagent.Client, id string, timeout time.Duration) (*awstypes.DataSource, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{}, + Target: []string{statusNormal}, + Refresh: statusDataSource(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*bedrockagent.DataSource); ok { + return out, err + } + + return nil, err +} + +// TIP: It is easier to determine whether a resource is updated for some +// resources than others. The best case is a status flag that tells you when +// the update has been fully realized. Other times, you can check to see if a +// key resource argument is updated to a new value or not. +func waitDataSourceUpdated(ctx context.Context, conn *bedrockagent.Client, id string, timeout time.Duration) (*awstypes.DataSource, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{statusChangePending}, + Target: []string{statusUpdated}, + Refresh: statusDataSource(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*bedrockagent.DataSource); ok { + return out, err + } + + return nil, err +} + +// TIP: A deleted waiter is almost like a backwards created waiter. There may +// be additional pending states, however. +func waitDataSourceDeleted(ctx context.Context, conn *bedrockagent.Client, id string, timeout time.Duration) (*awstypes.DataSource, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{statusDeleting, statusNormal}, + Target: []string{}, + Refresh: statusDataSource(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*bedrockagent.DataSource); ok { + return out, err + } + + return nil, err +} + +// TIP: ==== STATUS ==== +// The status function can return an actual status when that field is +// available from the API (e.g., out.Status). Otherwise, you can use custom +// statuses to communicate the states of the resource. +// +// Waiters consume the values returned by status functions. Design status so +// that it can be reused by a create, update, and delete waiter, if possible. +func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findDataSourceByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, aws.ToString(out.Status), nil + } +} + +// TIP: ==== FINDERS ==== +// The find function is not strictly necessary. You could do the API +// request from the status function. However, we have found that find often +// comes in handy in other places besides the status function. As a result, it +// is good practice to define it separately. +func findDataSourceByID(ctx context.Context, conn *bedrockagent.Client, id string) (*awstypes.DataSource, error) { + in := &bedrockagent.GetDataSourceInput{ + Id: aws.String(id), + } + + out, err := conn.GetDataSource(ctx, in) + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil || out.DataSource == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out.DataSource, nil +} + +// TIP: ==== FLEX ==== +// Flatteners and expanders ("flex" functions) help handle complex data +// types. Flatteners take an API data type and return the equivalent Plugin-Framework +// type. In other words, flatteners translate from AWS -> Terraform. +// +// On the other hand, expanders take a Terraform data structure and return +// something that you can send to the AWS API. In other words, expanders +// translate from Terraform -> AWS. +// +// See more: +// https://hashicorp.github.io/terraform-provider-aws/data-handling-and-conversion/ +func flattenComplexArgument(ctx context.Context, apiObject *awstypes.ComplexArgument) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: complexArgumentAttrTypes} + + if apiObject == nil { + return types.ListNull(elemType), diags + } + + obj := map[string]attr.Value{ + "nested_required": flex.StringValueToFramework(ctx, apiObject.NestedRequired), + "nested_optional": flex.StringValueToFramework(ctx, apiObject.NestedOptional), + } + objVal, d := types.ObjectValue(complexArgumentAttrTypes, obj) + diags.Append(d...) + + listVal, d := types.ListValue(elemType, []attr.Value{objVal}) + diags.Append(d...) + + return listVal, diags +} + +// TIP: Often the AWS API will return a slice of structures in response to a +// request for information. Sometimes you will have set criteria (e.g., the ID) +// that means you'll get back a one-length slice. This plural function works +// brilliantly for that situation too. +func flattenComplexArguments(ctx context.Context, apiObjects []*awstypes.ComplexArgument) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: complexArgumentAttrTypes} + + if len(apiObjects) == 0 { + return types.ListNull(elemType), diags + } + + elems := []attr.Value{} + for _, apiObject := range apiObjects { + if apiObject == nil { + continue + } + + obj := map[string]attr.Value{ + "nested_required": flex.StringValueToFramework(ctx, apiObject.NestedRequired), + "nested_optional": flex.StringValueToFramework(ctx, apiObject.NestedOptional), + } + objVal, d := types.ObjectValue(complexArgumentAttrTypes, obj) + diags.Append(d...) + + elems = append(elems, objVal) + } + + listVal, d := types.ListValue(elemType, elems) + diags.Append(d...) + + return listVal, diags +} + +// TIP: Remember, as mentioned above, expanders take a Terraform data structure +// and return something that you can send to the AWS API. In other words, +// expanders translate from Terraform -> AWS. +// +// See more: +// https://hashicorp.github.io/terraform-provider-aws/data-handling-and-conversion/ +func expandComplexArgument(tfList []complexArgumentData) *awstypes.ComplexArgument { + if len(tfList) == 0 { + return nil + } + + tfObj := tfList[0] + apiObject := &awstypes.ComplexArgument{ + NestedRequired: aws.String(tfObj.NestedRequired.ValueString()), + } + if !tfObj.NestedOptional.IsNull() { + apiObject.NestedOptional = aws.String(tfObj.NestedOptional.ValueString()) + } + + return apiObject +} + +// TIP: Even when you have a list with max length of 1, this plural function +// works brilliantly. However, if the AWS API takes a structure rather than a +// slice of structures, you will not need it. +func expandComplexArguments(tfList []complexArgumentData) []*bedrockagent.ComplexArgument { + // TIP: The AWS API can be picky about whether you send a nil or zero- + // length for an argument that should be cleared. For example, in some + // cases, if you send a nil value, the AWS API interprets that as "make no + // changes" when what you want to say is "remove everything." Sometimes + // using a zero-length list will cause an error. + // + // As a result, here are two options. Usually, option 1, nil, will work as + // expected, clearing the field. But, test going from something to nothing + // to make sure it works. If not, try the second option. + // TIP: Option 1: Returning nil for zero-length list + if len(tfList) == 0 { + return nil + } + var apiObject []*awstypes.ComplexArgument + // TIP: Option 2: Return zero-length list for zero-length list. If option 1 does + // not work, after testing going from something to nothing (if that is + // possible), uncomment out the next line and remove option 1. + // + // apiObject := make([]*bedrockagent.ComplexArgument, 0) + + for _, tfObj := range tfList { + item := &bedrockagent.ComplexArgument{ + NestedRequired: aws.String(tfObj.NestedRequired.ValueString()), + } + if !tfObj.NestedOptional.IsNull() { + item.NestedOptional = aws.String(tfObj.NestedOptional.ValueString()) + } + + apiObject = append(apiObject, item) + } + + return apiObject +} + +type dataSourceResourceModel struct { + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + DataDeletionPolicy types.String `tfsdk:"data_deletion_policy"` + DataSourceConfiguration fwtypes.ListNestedObjectValueOf[dataSourceConfigurationModel] `tfsdk:"data_source_configuration"` + DataSourceID types.String `tfsdk:"id"` + Description types.String `tfsdk:"description"` + FailureReasons fwtypes.ListValueOf[types.String] `tfsdk:"failure_reasons"` + KnowledgeBaseID types.String `tfsdk:"knowledge_base_id"` + Name types.String `tfsdk:"name"` + ServerSideEncryptionConfiguration fwtypes.ListNestedObjectValueOf[serverSideEncryptionConfigurationModel] `tfsdk:"server_side_encryption_configuration"` + VectorIngestionConfiguration fwtypes.ListNestedObjectValueOf[vectorIngestionConfigurationModel] `tfsdk:"storage_configuration"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + UpdatedAt timetypes.RFC3339 `tfsdk:"updated_at"` +} + +type dataSourceConfigurationModel struct { + Type types.String `tfsdk:"type"` + S3Configuration fwtypes.ListNestedObjectValueOf[s3ConfigurationModel] `tfsdk:"s3_configuration"` +} + +type s3ConfigurationModel struct { + BucketARN fwtypes.ARN `tfsdk:"bucket_arn"` + BucketOwnerAccountId types.String `tfsdk:"bucket_owner_account_id"` + InclusionPrefixes fwtypes.SetValueOf[types.String] `tfsdk:"inclusion_prefixes"` +} + +type serverSideEncryptionConfigurationModel struct { + KmsKeyArn types.String `tfsdk:"kms_key_arn"` +} + +type vectorIngestionConfigurationModell struct { + ChunkingConfiguration fwtypes.ListNestedObjectValueOf[chunkingConfigurationModel] `tfsdk:"chunking_Configuration"` +} + +type chunkingConfigurationModel struct { + ChunkingStrategy types.String `tfsdk:"chunking_strategy"` + FixedSizeChunkingConfiguration fwtypes.ListNestedObjectValueOf[fixedSizeChunkingConfigurationModel] `tfsdk:"fixed_size_chunking_configuration"` +} + +type fixedSizeChunkingConfigurationModel struct { + MaxTokens types.Number `tfsdk:"max_tokens"` + OverlapPercentage types.Number `tfsdk:"overlap_percentage"` +} diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go new file mode 100644 index 00000000000..625bc56b811 --- /dev/null +++ b/internal/service/bedrockagent/data_source_test.go @@ -0,0 +1,333 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package bedrockagent_test + +// **PLEASE DELETE THIS AND ALL TIP COMMENTS BEFORE SUBMITTING A PR FOR REVIEW!** +// +// TIP: ==== INTRODUCTION ==== +// Thank you for trying the skaff tool! +// +// You have opted to include these helpful comments. They all include "TIP:" +// to help you find and remove them when you're done with them. +// +// While some aspects of this file are customized to your input, the +// scaffold tool does *not* look at the AWS API and ensure it has correct +// function, structure, and variable names. It makes guesses based on +// commonalities. You will need to make significant adjustments. +// +// In other words, as generated, this is a rough outline of the work you will +// need to do. If something doesn't make sense for your situation, get rid of +// it. + +import ( + // TIP: ==== IMPORTS ==== + // This is a common set of imports but not customized to your code since + // your code hasn't been written yet. Make sure you, your IDE, or + // goimports -w fixes these imports. + // + // The provider linter wants your imports to be in two groups: first, + // standard library (i.e., "fmt" or "strings"), second, everything else. + // + // Also, AWS Go SDK v2 may handle nested structures differently than v1, + // using the services/bedrockagent/types package. If so, you'll + // need to import types and reference the nested types, e.g., as + // types.. + "context" + "errors" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/bedrockagent" + "github.com/aws/aws-sdk-go-v2/service/bedrockagent/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/names" + + // TIP: You will often need to import the package that this test file lives + // in. Since it is in the "test" context, it must import the package to use + // any normal context constants, variables, or functions. + tfbedrockagent "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagent" +) + +// TIP: File Structure. The basic outline for all test files should be as +// follows. Improve this resource's maintainability by following this +// outline. +// +// 1. Package declaration (add "_test" since this is a test file) +// 2. Imports +// 3. Unit tests +// 4. Basic test +// 5. Disappears test +// 6. All the other tests +// 7. Helper functions (exists, destroy, check, etc.) +// 8. Functions that return Terraform configurations + +// TIP: ==== UNIT TESTS ==== +// This is an example of a unit test. Its name is not prefixed with +// "TestAcc" like an acceptance test. +// +// Unlike acceptance tests, unit tests do not access AWS and are focused on a +// function (or method). Because of this, they are quick and cheap to run. +// +// In designing a resource's implementation, isolate complex bits from AWS bits +// so that they can be tested through a unit test. We encourage more unit tests +// in the provider. +// +// Cut and dry functions using well-used patterns, like typical flatteners and +// expanders, don't need unit testing. However, if they are complex or +// intricate, they should be unit tested. +func TestDataSourceExampleUnitTest(t *testing.T) { + t.Parallel() + + testCases := []struct { + TestName string + Input string + Expected string + Error bool + }{ + { + TestName: "empty", + Input: "", + Expected: "", + Error: true, + }, + { + TestName: "descriptive name", + Input: "some input", + Expected: "some output", + Error: false, + }, + { + TestName: "another descriptive name", + Input: "more input", + Expected: "more output", + Error: false, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.TestName, func(t *testing.T) { + t.Parallel() + got, err := tfbedrockagent.FunctionFromResource(testCase.Input) + + if err != nil && !testCase.Error { + t.Errorf("got error (%s), expected no error", err) + } + + if err == nil && testCase.Error { + t.Errorf("got (%s) and no error, expected error", got) + } + + if got != testCase.Expected { + t.Errorf("got %s, expected %s", got, testCase.Expected) + } + }) + } +} + +// TIP: ==== ACCEPTANCE TESTS ==== +// This is an example of a basic acceptance test. This should test as much of +// standard functionality of the resource as possible, and test importing, if +// applicable. We prefix its name with "TestAcc", the service, and the +// resource name. +// +// Acceptance test access AWS and cost money to run. +func TestAccBedrockAgentDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + // TIP: This is a long-running test guard for tests that run longer than + // 300s (5 min) generally. + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datasource bedrockagent.DescribeDataSourceResponse + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagent_data_source.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockAgentEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataSourceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataSourceExists(ctx, resourceName, &datasource), + resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), + resource.TestCheckResourceAttrSet(resourceName, "maintenance_window_start_time.0.day_of_week"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user.*", map[string]string{ + "console_access": "false", + "groups.#": "0", + "username": "Test", + "password": "TestTest1234", + }), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "bedrockagent", regexache.MustCompile(`datasource:+.`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + }, + }, + }) +} + +func TestAccBedrockAgentDataSource_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var datasource bedrockagent.DescribeDataSourceResponse + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagent_data_source.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.BedrockAgentEndpointID) + testAccPreCheck(t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataSourceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceConfig_basic(rName, testAccDataSourceVersionNewer), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataSourceExists(ctx, resourceName, &datasource), + // TIP: The Plugin-Framework disappears helper is similar to the Plugin-SDK version, + // but expects a new resource factory function as the third argument. To expose this + // private function to the testing package, you may need to add a line like the following + // to exports_test.go: + // + // var ResourceDataSource = newResourceDataSource + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfbedrockagent.ResourceDataSource, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckDataSourceDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_bedrockagent_data_source" { + continue + } + + input := &bedrockagent.DescribeDataSourceInput{ + DataSourceId: aws.String(rs.Primary.ID), + } + _, err := conn.DescribeDataSource(ctx, &bedrockagent.DescribeDataSourceInput{ + DataSourceId: aws.String(rs.Primary.ID), + }) + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil + } + if err != nil { + return create.Error(names.BedrockAgent, create.ErrActionCheckingDestroyed, tfbedrockagent.ResNameDataSource, rs.Primary.ID, err) + } + + return create.Error(names.BedrockAgent, create.ErrActionCheckingDestroyed, tfbedrockagent.ResNameDataSource, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckDataSourceExists(ctx context.Context, name string, datasource *bedrockagent.DescribeDataSourceResponse) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.BedrockAgent, create.ErrActionCheckingExistence, tfbedrockagent.ResNameDataSource, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.BedrockAgent, create.ErrActionCheckingExistence, tfbedrockagent.ResNameDataSource, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentClient(ctx) + resp, err := conn.DescribeDataSource(ctx, &bedrockagent.DescribeDataSourceInput{ + DataSourceId: aws.String(rs.Primary.ID), + }) + + if err != nil { + return create.Error(names.BedrockAgent, create.ErrActionCheckingExistence, tfbedrockagent.ResNameDataSource, rs.Primary.ID, err) + } + + *datasource = *resp + + return nil + } +} + +func testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentClient(ctx) + + input := &bedrockagent.ListDataSourcesInput{} + _, err := conn.ListDataSources(ctx, input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + +func testAccCheckDataSourceNotRecreated(before, after *bedrockagent.DescribeDataSourceResponse) resource.TestCheckFunc { + return func(s *terraform.State) error { + if before, after := aws.ToString(before.DataSourceId), aws.ToString(after.DataSourceId); before != after { + return create.Error(names.BedrockAgent, create.ErrActionCheckingNotRecreated, tfbedrockagent.ResNameDataSource, aws.ToString(before.DataSourceId), errors.New("recreated")) + } + + return nil + } +} + +func testAccDataSourceConfig_basic(rName, version string) string { + return fmt.Sprintf(` +resource "aws_security_group" "test" { + name = %[1]q +} + +resource "aws_bedrockagent_data_source" "test" { + data_source_name = %[1]q + engine_type = "ActiveBedrockAgent" + engine_version = %[2]q + host_instance_type = "bedrockagent.t2.micro" + security_groups = [aws_security_group.test.id] + authentication_strategy = "simple" + storage_type = "efs" + + logs { + general = true + } + + user { + username = "Test" + password = "TestTest1234" + } +} +`, rName, version) +} diff --git a/website/docs/r/bedrockagent_data_source.html.markdown b/website/docs/r/bedrockagent_data_source.html.markdown new file mode 100644 index 00000000000..a27cea72812 --- /dev/null +++ b/website/docs/r/bedrockagent_data_source.html.markdown @@ -0,0 +1,69 @@ +--- +subcategory: "Agents for Amazon Bedrock" +layout: "aws" +page_title: "AWS: aws_bedrockagent_data_source" +description: |- + Terraform resource for managing an AWS Agents for Amazon Bedrock Data Source. +--- +` +# Resource: aws_bedrockagent_data_source + +Terraform resource for managing an AWS Agents for Amazon Bedrock Data Source. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_bedrockagent_data_source" "example" { +} +``` + +## Argument Reference + +The following arguments are required: + +* `example_arg` - (Required) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +The following arguments are optional: + +* `optional_arg` - (Optional) Concise argument description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Data Source. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `example_attribute` - Concise description. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `60m`) +* `update` - (Default `180m`) +* `delete` - (Default `90m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Agents for Amazon Bedrock Data Source using the `example_id_arg`. For example: + +```terraform +import { + to = aws_bedrockagent_data_source.example + id = "data_source-id-12345678" +} +``` + +Using `terraform import`, import Agents for Amazon Bedrock Data Source using the `example_id_arg`. For example: + +```console +% terraform import aws_bedrockagent_data_source.example data_source-id-12345678 +``` From c2730a3462d02073c91d23f74a4823c84b2f0574 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Sun, 28 Apr 2024 20:41:40 +0100 Subject: [PATCH 02/15] Initial code --- internal/service/bedrockagent/data_source.go | 603 ++++-------------- .../service/bedrockagent/data_source_test.go | 271 ++------ internal/service/bedrockagent/exports_test.go | 2 + 3 files changed, 182 insertions(+), 694 deletions(-) diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index 9e62b4c1c08..6ff5be8f221 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -5,16 +5,15 @@ package bedrockagent import ( "context" - "errors" + "fmt" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/bedrockagent" awstypes "github.com/aws/aws-sdk-go-v2/service/bedrockagent/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" - "github.com/hashicorp/terraform-plugin-framework/attr" - "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -22,21 +21,21 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/names" ) // Function annotations are used for resource registration to the Provider. DO NOT EDIT. // @FrameworkResource(name="Data Source") -func newResourceDataSource(_ context.Context) (resource.ResourceWithConfigure, error) { - r := &resourceDataSource{} +func newDataSourceResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &dataSourceResource{} r.SetDefaultCreateTimeout(30 * time.Minute) r.SetDefaultUpdateTimeout(30 * time.Minute) @@ -49,19 +48,18 @@ const ( ResNameDataSource = "Data Source" ) -type resourceDataSource struct { +type dataSourceResource struct { framework.ResourceWithConfigure framework.WithTimeouts } -func (r *resourceDataSource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { +func (r *dataSourceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { resp.TypeName = "aws_bedrockagent_data_source" } -func (r *resourceDataSource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { +func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - "arn": framework.ARNAttributeComputedOnly(), "data_deletion_policy": schema.StringAttribute{ Optional: true, }, @@ -81,7 +79,7 @@ func (r *resourceDataSource) Schema(ctx context.Context, req resource.SchemaRequ }, Blocks: map[string]schema.Block{ "server_side_encryption_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[vectorKnowledgeBaseConfigurationModel](ctx), + CustomType: fwtypes.NewListNestedObjectTypeOf[serverSideEncryptionConfigurationModel](ctx), Validators: []validator.List{ listvalidator.IsRequired(), listvalidator.SizeAtLeast(1), @@ -97,7 +95,7 @@ func (r *resourceDataSource) Schema(ctx context.Context, req resource.SchemaRequ }, }, "data_source_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[knowledgeBaseConfigurationModel](ctx), + CustomType: fwtypes.NewListNestedObjectTypeOf[dataSourceConfigurationModel](ctx), Validators: []validator.List{ listvalidator.IsRequired(), listvalidator.SizeAtLeast(1), @@ -111,7 +109,7 @@ func (r *resourceDataSource) Schema(ctx context.Context, req resource.SchemaRequ }, Blocks: map[string]schema.Block{ "s3_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[vectorKnowledgeBaseConfigurationModel](ctx), + CustomType: fwtypes.NewListNestedObjectTypeOf[s3ConfigurationModel](ctx), Validators: []validator.List{ listvalidator.IsRequired(), listvalidator.SizeAtLeast(1), @@ -138,7 +136,7 @@ func (r *resourceDataSource) Schema(ctx context.Context, req resource.SchemaRequ }, }, "vector_ingestion_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[storageConfigurationModel](ctx), + CustomType: fwtypes.NewListNestedObjectTypeOf[vectorIngestionConfigurationModel](ctx), Validators: []validator.List{ listvalidator.IsRequired(), listvalidator.SizeAtLeast(1), @@ -147,7 +145,7 @@ func (r *resourceDataSource) Schema(ctx context.Context, req resource.SchemaRequ NestedObject: schema.NestedBlockObject{ Blocks: map[string]schema.Block{ "chunking_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[pineconeConfigurationModel](ctx), + CustomType: fwtypes.NewListNestedObjectTypeOf[chunkingConfigurationModel](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -159,7 +157,7 @@ func (r *resourceDataSource) Schema(ctx context.Context, req resource.SchemaRequ }, Blocks: map[string]schema.Block{ "fixed_size_chunking_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[pineconeFieldMappingModel](ctx), + CustomType: fwtypes.NewListNestedObjectTypeOf[fixedSizeChunkingConfigurationModel](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -189,419 +187,230 @@ func (r *resourceDataSource) Schema(ctx context.Context, req resource.SchemaRequ } } -func (r *resourceDataSource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - // TIP: ==== RESOURCE CREATE ==== - // Generally, the Create function should do the following things. Make - // sure there is a good reason if you don't do one of these. - // - // 1. Get a client connection to the relevant service - // 2. Fetch the plan - // 3. Populate a create input structure - // 4. Call the AWS create/put function - // 5. Using the output from the create function, set the minimum arguments - // and attributes for the Read function to work, as well as any computed - // only attributes. - // 6. Use a waiter to wait for create to complete - // 7. Save the request plan to response state - - // TIP: -- 1. Get a client connection to the relevant service - conn := r.Meta().BedrockAgentClient(ctx) - - // TIP: -- 2. Fetch the plan - var plan resourceDataSourceData - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *dataSourceResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data dataSourceResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - // TIP: -- 3. Populate a create input structure - in := &bedrockagent.CreateDataSourceInput{ - // TIP: Mandatory or fields that will always be present can be set when - // you create the Input structure. (Replace these with real fields.) - DataSourceName: aws.String(plan.Name.ValueString()), - DataSourceType: aws.String(plan.Type.ValueString()), - } + conn := r.Meta().BedrockAgentClient(ctx) - if !plan.Description.IsNull() { - // TIP: Optional fields should be set based on whether or not they are - // used. - in.Description = aws.String(plan.Description.ValueString()) + input := &bedrockagent.CreateDataSourceInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return } - if !plan.ComplexArgument.IsNull() { - // TIP: Use an expander to assign a complex argument. The elements must be - // deserialized into the appropriate struct before being passed to the expander. - var tfList []complexArgumentData - resp.Diagnostics.Append(plan.ComplexArgument.ElementsAs(ctx, &tfList, false)...) - if resp.Diagnostics.HasError() { - return - } + input.ClientToken = aws.String(id.UniqueId()) - in.ComplexArgument = expandComplexArgument(tfList) - } + outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { + return conn.CreateDataSource(ctx, input) + }, errCodeValidationException, "cannot assume role") - // TIP: -- 4. Call the AWS create function - out, err := conn.CreateDataSource(ctx, in) if err != nil { - // TIP: Since ID has not been set yet, you cannot use plan.ID.String() - // in error messages at this point. - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionCreating, ResNameDataSource, plan.Name.String(), err), - err.Error(), - ) - return - } - if out == nil || out.DataSource == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionCreating, ResNameDataSource, plan.Name.String(), nil), - errors.New("empty output").Error(), - ) + response.Diagnostics.AddError("creating Bedrock Agent Data Source", err.Error()) + return } - // TIP: -- 5. Using the output from the create function, set the minimum attributes - plan.ARN = flex.StringToFramework(ctx, out.DataSource.Arn) - plan.ID = flex.StringToFramework(ctx, out.DataSource.DataSourceId) + ds := outputRaw.(*bedrockagent.CreateDataSourceOutput).DataSource + data.DataSourceID = fwflex.StringToFramework(ctx, ds.DataSourceId) + data.KnowledgeBaseID = fwflex.StringToFramework(ctx, ds.KnowledgeBaseId) + + ds, err = waitDataSourceCreated(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) - // TIP: -- 6. Use a waiter to wait for create to complete - createTimeout := r.CreateTimeout(ctx, plan.Timeouts) - _, err = waitDataSourceCreated(ctx, conn, plan.ID.ValueString(), createTimeout) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionWaitingForCreation, ResNameDataSource, plan.Name.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Data Source (%s) create", data.DataSourceID.ValueString()), err.Error()) + return } - // TIP: -- 7. Save the request plan to response state - resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) -} + // Set values for unknowns after creation is complete. + data.CreatedAt = fwflex.TimeToFramework(ctx, ds.CreatedAt) + data.FailureReasons = fwflex.FlattenFrameworkStringValueListOfString(ctx, ds.FailureReasons) + data.UpdatedAt = fwflex.TimeToFramework(ctx, ds.UpdatedAt) -func (r *resourceDataSource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - // TIP: ==== RESOURCE READ ==== - // Generally, the Read function should do the following things. Make - // sure there is a good reason if you don't do one of these. - // - // 1. Get a client connection to the relevant service - // 2. Fetch the state - // 3. Get the resource from AWS - // 4. Remove resource from state if it is not found - // 5. Set the arguments and attributes - // 6. Set the state - - // TIP: -- 1. Get a client connection to the relevant service - conn := r.Meta().BedrockAgentClient(ctx) + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} - // TIP: -- 2. Fetch the state - var state resourceDataSourceData - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *dataSourceResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data dataSourceResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - // TIP: -- 3. Get the resource from AWS using an API Get, List, or Describe- - // type function, or, better yet, using a finder. - out, err := findDataSourceByID(ctx, conn, state.ID.ValueString()) - // TIP: -- 4. Remove resource from state if it is not found + conn := r.Meta().BedrockAgentClient(ctx) + + ds, err := findDataSourceByID(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString()) + if tfresource.NotFound(err) { - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionSetting, ResNameDataSource, state.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading Bedrock Agent Data Source (%s)", data.DataSourceID.ValueString()), err.Error()) + return } - // TIP: -- 5. Set the arguments and attributes - // - // For simple data types (i.e., schema.StringAttribute, schema.BoolAttribute, - // schema.Int64Attribute, and schema.Float64Attribue), simply setting the - // appropriate data struct field is sufficient. The flex package implements - // helpers for converting between Go and Plugin-Framework types seamlessly. No - // error or nil checking is necessary. - // - // However, there are some situations where more handling is needed such as - // complex data types (e.g., schema.ListAttribute, schema.SetAttribute). In - // these cases the flatten function may have a diagnostics return value, which - // should be appended to resp.Diagnostics. - state.ARN = flex.StringToFramework(ctx, out.Arn) - state.ID = flex.StringToFramework(ctx, out.DataSourceId) - state.Name = flex.StringToFramework(ctx, out.DataSourceName) - state.Type = flex.StringToFramework(ctx, out.DataSourceType) - - // TIP: Setting a complex type. - complexArgument, d := flattenComplexArgument(ctx, out.ComplexArgument) - resp.Diagnostics.Append(d...) - state.ComplexArgument = complexArgument - - // TIP: -- 6. Set the state - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) -} + response.Diagnostics.Append(fwflex.Flatten(ctx, ds, &data)...) + if response.Diagnostics.HasError() { + return + } -func (r *resourceDataSource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - // TIP: ==== RESOURCE UPDATE ==== - // Not all resources have Update functions. There are a few reasons: - // a. The AWS API does not support changing a resource - // b. All arguments have RequiresReplace() plan modifiers - // c. The AWS API uses a create call to modify an existing resource - // - // In the cases of a. and b., the resource will not have an update method - // defined. In the case of c., Update and Create can be refactored to call - // the same underlying function. - // - // The rest of the time, there should be an Update function and it should - // do the following things. Make sure there is a good reason if you don't - // do one of these. - // - // 1. Get a client connection to the relevant service - // 2. Fetch the plan and state - // 3. Populate a modify input structure and check for changes - // 4. Call the AWS modify/update function - // 5. Use a waiter to wait for update to complete - // 6. Save the request plan to response state - // TIP: -- 1. Get a client connection to the relevant service - conn := r.Meta().BedrockAgentClient(ctx) + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} - // TIP: -- 2. Fetch the plan - var plan, state resourceDataSourceData - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *dataSourceResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new dataSourceResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { return } - // TIP: -- 3. Populate a modify input structure and check for changes - if !plan.Name.Equal(state.Name) || - !plan.Description.Equal(state.Description) || - !plan.ComplexArgument.Equal(state.ComplexArgument) || - !plan.Type.Equal(state.Type) { - - in := &bedrockagent.UpdateDataSourceInput{ - // TIP: Mandatory or fields that will always be present can be set when - // you create the Input structure. (Replace these with real fields.) - DataSourceId: aws.String(plan.ID.ValueString()), - DataSourceName: aws.String(plan.Name.ValueString()), - DataSourceType: aws.String(plan.Type.ValueString()), - } + conn := r.Meta().BedrockAgentClient(ctx) - if !plan.Description.IsNull() { - // TIP: Optional fields should be set based on whether or not they are - // used. - in.Description = aws.String(plan.Description.ValueString()) + if !new.Description.Equal(old.Description) || + !new.DataDeletionPolicy.Equal(old.DataDeletionPolicy) || + !new.Name.Equal(old.Name) || + !new.DataSourceConfiguration.Equal(old.DataSourceConfiguration) || + !new.DataDeletionPolicy.Equal(old.DataDeletionPolicy) || + !new.ServerSideEncryptionConfiguration.Equal(old.ServerSideEncryptionConfiguration) || + !new.VectorIngestionConfiguration.Equal(old.VectorIngestionConfiguration) { + input := &bedrockagent.UpdateDataSourceInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, new, input)...) + if response.Diagnostics.HasError() { + return } - if !plan.ComplexArgument.IsNull() { - // TIP: Use an expander to assign a complex argument. The elements must be - // deserialized into the appropriate struct before being passed to the expander. - var tfList []complexArgumentData - resp.Diagnostics.Append(plan.ComplexArgument.ElementsAs(ctx, &tfList, false)...) - if resp.Diagnostics.HasError() { - return - } - in.ComplexArgument = expandComplexArgument(tfList) - } + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { + return conn.UpdateDataSource(ctx, input) + }, errCodeValidationException, "cannot assume role") - // TIP: -- 4. Call the AWS modify/update function - out, err := conn.UpdateDataSource(ctx, in) if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionUpdating, ResNameDataSource, plan.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("updating Bedrock Agent Data Source (%s)", new.DataSourceID.ValueString()), err.Error()) + return } - if out == nil || out.DataSource == nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionUpdating, ResNameDataSource, plan.ID.String(), nil), - errors.New("empty output").Error(), - ) + + ds, err := waitDataSourceUpdated(ctx, conn, new.DataSourceID.ValueString(), new.KnowledgeBaseID.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Data Source (%s) update", new.DataSourceID.ValueString()), err.Error()) + return } - // TIP: Using the output from the update function, re-set any computed attributes - plan.ARN = flex.StringToFramework(ctx, out.DataSource.Arn) - plan.ID = flex.StringToFramework(ctx, out.DataSource.DataSourceId) + new.FailureReasons = fwflex.FlattenFrameworkStringValueListOfString(ctx, ds.FailureReasons) + new.UpdatedAt = fwflex.TimeToFramework(ctx, ds.UpdatedAt) + } else { + new.FailureReasons = old.FailureReasons + new.UpdatedAt = old.UpdatedAt } - // TIP: -- 5. Use a waiter to wait for update to complete - updateTimeout := r.UpdateTimeout(ctx, plan.Timeouts) - _, err := waitDataSourceUpdated(ctx, conn, plan.ID.ValueString(), updateTimeout) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionWaitingForUpdate, ResNameDataSource, plan.ID.String(), err), - err.Error(), - ) + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *dataSourceResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data dataSourceResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - // TIP: -- 6. Save the request plan to response state - resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) -} - -func (r *resourceDataSource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - // TIP: ==== RESOURCE DELETE ==== - // Most resources have Delete functions. There are rare situations - // where you might not need a delete: - // a. The AWS API does not provide a way to delete the resource - // b. The point of your resource is to perform an action (e.g., reboot a - // server) and deleting serves no purpose. - // - // The Delete function should do the following things. Make sure there - // is a good reason if you don't do one of these. - // - // 1. Get a client connection to the relevant service - // 2. Fetch the state - // 3. Populate a delete input structure - // 4. Call the AWS delete function - // 5. Use a waiter to wait for delete to complete - // TIP: -- 1. Get a client connection to the relevant service conn := r.Meta().BedrockAgentClient(ctx) - // TIP: -- 2. Fetch the state - var state resourceDataSourceData - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { - return - } + _, err := conn.DeleteDataSource(ctx, &bedrockagent.DeleteDataSourceInput{ + KnowledgeBaseId: aws.String(data.KnowledgeBaseID.ValueString()), + DataSourceId: aws.String(data.DataSourceID.ValueString()), + }) - // TIP: -- 3. Populate a delete input structure - in := &bedrockagent.DeleteDataSourceInput{ - DataSourceId: aws.String(state.ID.ValueString()), + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return } - // TIP: -- 4. Call the AWS delete function - _, err := conn.DeleteDataSource(ctx, in) - // TIP: On rare occassions, the API returns a not found error after deleting a - // resource. If that happens, we don't want it to show up as an error. if err != nil { - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return - } - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionDeleting, ResNameDataSource, state.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("deleting Bedrock Agent Data Source (%s)", data.DataSourceID.ValueString()), err.Error()) + return } - // TIP: -- 5. Use a waiter to wait for delete to complete - deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) - _, err = waitDataSourceDeleted(ctx, conn, state.ID.ValueString(), deleteTimeout) + _, err = waitDataSourceDeleted(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.BedrockAgent, create.ErrActionWaitingForDeletion, ResNameDataSource, state.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Knowledge Base (%s) delete", data.KnowledgeBaseID.ValueString()), err.Error()) + return } } -// TIP: ==== TERRAFORM IMPORTING ==== -// If Read can get all the information it needs from the Identifier -// (i.e., path.Root("id")), you can use the PassthroughID importer. Otherwise, -// you'll need a custom import function. -// -// See more: -// https://developer.hashicorp.com/terraform/plugin/framework/resources/import -func (r *resourceDataSource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { +func (r *dataSourceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } -// TIP: ==== STATUS CONSTANTS ==== -// Create constants for states and statuses if the service does not -// already have suitable constants. We prefer that you use the constants -// provided in the service if available (e.g., awstypes.StatusInProgress). -const ( - statusChangePending = "Pending" - statusDeleting = "Deleting" - statusNormal = "Normal" - statusUpdated = "Updated" -) - -// TIP: ==== WAITERS ==== -// Some resources of some services have waiters provided by the AWS API. -// Unless they do not work properly, use them rather than defining new ones -// here. -// -// Sometimes we define the wait, status, and find functions in separate -// files, wait.go, status.go, and find.go. Follow the pattern set out in the -// service and define these where it makes the most sense. -// -// If these functions are used in the _test.go file, they will need to be -// exported (i.e., capitalized). -// -// You will need to adjust the parameters and names to fit the service. -func waitDataSourceCreated(ctx context.Context, conn *bedrockagent.Client, id string, timeout time.Duration) (*awstypes.DataSource, error) { +func waitDataSourceCreated(ctx context.Context, conn *bedrockagent.Client, id, kbID string, timeout time.Duration) (*awstypes.DataSource, error) { stateConf := &retry.StateChangeConf{ Pending: []string{}, - Target: []string{statusNormal}, - Refresh: statusDataSource(ctx, conn, id), + Target: enum.Slice(awstypes.DataSourceStatusAvailable), + Refresh: statusDataSource(ctx, conn, id, kbID), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*bedrockagent.DataSource); ok { + if out, ok := outputRaw.(*awstypes.DataSource); ok { return out, err } return nil, err } -// TIP: It is easier to determine whether a resource is updated for some -// resources than others. The best case is a status flag that tells you when -// the update has been fully realized. Other times, you can check to see if a -// key resource argument is updated to a new value or not. -func waitDataSourceUpdated(ctx context.Context, conn *bedrockagent.Client, id string, timeout time.Duration) (*awstypes.DataSource, error) { +func waitDataSourceUpdated(ctx context.Context, conn *bedrockagent.Client, id, kbID string, timeout time.Duration) (*awstypes.DataSource, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{statusChangePending}, - Target: []string{statusUpdated}, - Refresh: statusDataSource(ctx, conn, id), + Pending: enum.Slice(awstypes.DataSourceStatusAvailable), + Target: enum.Slice(awstypes.DataSourceStatusAvailable), + Refresh: statusDataSource(ctx, conn, id, kbID), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*bedrockagent.DataSource); ok { + if out, ok := outputRaw.(*awstypes.DataSource); ok { return out, err } return nil, err } -// TIP: A deleted waiter is almost like a backwards created waiter. There may -// be additional pending states, however. -func waitDataSourceDeleted(ctx context.Context, conn *bedrockagent.Client, id string, timeout time.Duration) (*awstypes.DataSource, error) { +func waitDataSourceDeleted(ctx context.Context, conn *bedrockagent.Client, id, kbID string, timeout time.Duration) (*awstypes.DataSource, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{statusDeleting, statusNormal}, + Pending: enum.Slice(awstypes.DataSourceStatusDeleting), Target: []string{}, - Refresh: statusDataSource(ctx, conn, id), + Refresh: statusDataSource(ctx, conn, id, kbID), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*bedrockagent.DataSource); ok { + if out, ok := outputRaw.(*awstypes.DataSource); ok { return out, err } return nil, err } -// TIP: ==== STATUS ==== -// The status function can return an actual status when that field is -// available from the API (e.g., out.Status). Otherwise, you can use custom -// statuses to communicate the states of the resource. -// -// Waiters consume the values returned by status functions. Design status so -// that it can be reused by a create, update, and delete waiter, if possible. -func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id string) retry.StateRefreshFunc { +func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id, kbID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findDataSourceByID(ctx, conn, id) + output, err := findDataSourceByID(ctx, conn, id, kbID) if tfresource.NotFound(err) { return nil, "", nil } @@ -610,18 +419,14 @@ func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id string) return nil, "", err } - return out, aws.ToString(out.Status), nil + return output, string(output.Status), nil } } -// TIP: ==== FINDERS ==== -// The find function is not strictly necessary. You could do the API -// request from the status function. However, we have found that find often -// comes in handy in other places besides the status function. As a result, it -// is good practice to define it separately. -func findDataSourceByID(ctx context.Context, conn *bedrockagent.Client, id string) (*awstypes.DataSource, error) { +func findDataSourceByID(ctx context.Context, conn *bedrockagent.Client, id, kbID string) (*awstypes.DataSource, error) { in := &bedrockagent.GetDataSourceInput{ - Id: aws.String(id), + DataSourceId: aws.String(id), + KnowledgeBaseId: aws.String(kbID), } out, err := conn.GetDataSource(ctx, in) @@ -643,132 +448,6 @@ func findDataSourceByID(ctx context.Context, conn *bedrockagent.Client, id strin return out.DataSource, nil } -// TIP: ==== FLEX ==== -// Flatteners and expanders ("flex" functions) help handle complex data -// types. Flatteners take an API data type and return the equivalent Plugin-Framework -// type. In other words, flatteners translate from AWS -> Terraform. -// -// On the other hand, expanders take a Terraform data structure and return -// something that you can send to the AWS API. In other words, expanders -// translate from Terraform -> AWS. -// -// See more: -// https://hashicorp.github.io/terraform-provider-aws/data-handling-and-conversion/ -func flattenComplexArgument(ctx context.Context, apiObject *awstypes.ComplexArgument) (types.List, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: complexArgumentAttrTypes} - - if apiObject == nil { - return types.ListNull(elemType), diags - } - - obj := map[string]attr.Value{ - "nested_required": flex.StringValueToFramework(ctx, apiObject.NestedRequired), - "nested_optional": flex.StringValueToFramework(ctx, apiObject.NestedOptional), - } - objVal, d := types.ObjectValue(complexArgumentAttrTypes, obj) - diags.Append(d...) - - listVal, d := types.ListValue(elemType, []attr.Value{objVal}) - diags.Append(d...) - - return listVal, diags -} - -// TIP: Often the AWS API will return a slice of structures in response to a -// request for information. Sometimes you will have set criteria (e.g., the ID) -// that means you'll get back a one-length slice. This plural function works -// brilliantly for that situation too. -func flattenComplexArguments(ctx context.Context, apiObjects []*awstypes.ComplexArgument) (types.List, diag.Diagnostics) { - var diags diag.Diagnostics - elemType := types.ObjectType{AttrTypes: complexArgumentAttrTypes} - - if len(apiObjects) == 0 { - return types.ListNull(elemType), diags - } - - elems := []attr.Value{} - for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - obj := map[string]attr.Value{ - "nested_required": flex.StringValueToFramework(ctx, apiObject.NestedRequired), - "nested_optional": flex.StringValueToFramework(ctx, apiObject.NestedOptional), - } - objVal, d := types.ObjectValue(complexArgumentAttrTypes, obj) - diags.Append(d...) - - elems = append(elems, objVal) - } - - listVal, d := types.ListValue(elemType, elems) - diags.Append(d...) - - return listVal, diags -} - -// TIP: Remember, as mentioned above, expanders take a Terraform data structure -// and return something that you can send to the AWS API. In other words, -// expanders translate from Terraform -> AWS. -// -// See more: -// https://hashicorp.github.io/terraform-provider-aws/data-handling-and-conversion/ -func expandComplexArgument(tfList []complexArgumentData) *awstypes.ComplexArgument { - if len(tfList) == 0 { - return nil - } - - tfObj := tfList[0] - apiObject := &awstypes.ComplexArgument{ - NestedRequired: aws.String(tfObj.NestedRequired.ValueString()), - } - if !tfObj.NestedOptional.IsNull() { - apiObject.NestedOptional = aws.String(tfObj.NestedOptional.ValueString()) - } - - return apiObject -} - -// TIP: Even when you have a list with max length of 1, this plural function -// works brilliantly. However, if the AWS API takes a structure rather than a -// slice of structures, you will not need it. -func expandComplexArguments(tfList []complexArgumentData) []*bedrockagent.ComplexArgument { - // TIP: The AWS API can be picky about whether you send a nil or zero- - // length for an argument that should be cleared. For example, in some - // cases, if you send a nil value, the AWS API interprets that as "make no - // changes" when what you want to say is "remove everything." Sometimes - // using a zero-length list will cause an error. - // - // As a result, here are two options. Usually, option 1, nil, will work as - // expected, clearing the field. But, test going from something to nothing - // to make sure it works. If not, try the second option. - // TIP: Option 1: Returning nil for zero-length list - if len(tfList) == 0 { - return nil - } - var apiObject []*awstypes.ComplexArgument - // TIP: Option 2: Return zero-length list for zero-length list. If option 1 does - // not work, after testing going from something to nothing (if that is - // possible), uncomment out the next line and remove option 1. - // - // apiObject := make([]*bedrockagent.ComplexArgument, 0) - - for _, tfObj := range tfList { - item := &bedrockagent.ComplexArgument{ - NestedRequired: aws.String(tfObj.NestedRequired.ValueString()), - } - if !tfObj.NestedOptional.IsNull() { - item.NestedOptional = aws.String(tfObj.NestedOptional.ValueString()) - } - - apiObject = append(apiObject, item) - } - - return apiObject -} - type dataSourceResourceModel struct { CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` DataDeletionPolicy types.String `tfsdk:"data_deletion_policy"` @@ -779,7 +458,7 @@ type dataSourceResourceModel struct { KnowledgeBaseID types.String `tfsdk:"knowledge_base_id"` Name types.String `tfsdk:"name"` ServerSideEncryptionConfiguration fwtypes.ListNestedObjectValueOf[serverSideEncryptionConfigurationModel] `tfsdk:"server_side_encryption_configuration"` - VectorIngestionConfiguration fwtypes.ListNestedObjectValueOf[vectorIngestionConfigurationModel] `tfsdk:"storage_configuration"` + VectorIngestionConfiguration fwtypes.ListNestedObjectValueOf[vectorIngestionConfigurationModel] `tfsdk:"vector_ingestion_configuration"` Tags types.Map `tfsdk:"tags"` TagsAll types.Map `tfsdk:"tags_all"` Timeouts timeouts.Value `tfsdk:"timeouts"` @@ -801,7 +480,7 @@ type serverSideEncryptionConfigurationModel struct { KmsKeyArn types.String `tfsdk:"kms_key_arn"` } -type vectorIngestionConfigurationModell struct { +type vectorIngestionConfigurationModel struct { ChunkingConfiguration fwtypes.ListNestedObjectValueOf[chunkingConfigurationModel] `tfsdk:"chunking_Configuration"` } diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index 625bc56b811..fe7520989f1 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -3,161 +3,33 @@ package bedrockagent_test -// **PLEASE DELETE THIS AND ALL TIP COMMENTS BEFORE SUBMITTING A PR FOR REVIEW!** -// -// TIP: ==== INTRODUCTION ==== -// Thank you for trying the skaff tool! -// -// You have opted to include these helpful comments. They all include "TIP:" -// to help you find and remove them when you're done with them. -// -// While some aspects of this file are customized to your input, the -// scaffold tool does *not* look at the AWS API and ensure it has correct -// function, structure, and variable names. It makes guesses based on -// commonalities. You will need to make significant adjustments. -// -// In other words, as generated, this is a rough outline of the work you will -// need to do. If something doesn't make sense for your situation, get rid of -// it. - import ( - // TIP: ==== IMPORTS ==== - // This is a common set of imports but not customized to your code since - // your code hasn't been written yet. Make sure you, your IDE, or - // goimports -w fixes these imports. - // - // The provider linter wants your imports to be in two groups: first, - // standard library (i.e., "fmt" or "strings"), second, everything else. - // - // Also, AWS Go SDK v2 may handle nested structures differently than v1, - // using the services/bedrockagent/types package. If so, you'll - // need to import types and reference the nested types, e.g., as - // types.. "context" - "errors" "fmt" "testing" - "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/bedrockagent" "github.com/aws/aws-sdk-go-v2/service/bedrockagent/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" - "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/names" - - // TIP: You will often need to import the package that this test file lives - // in. Since it is in the "test" context, it must import the package to use - // any normal context constants, variables, or functions. tfbedrockagent "github.com/hashicorp/terraform-provider-aws/internal/service/bedrockagent" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) -// TIP: File Structure. The basic outline for all test files should be as -// follows. Improve this resource's maintainability by following this -// outline. -// -// 1. Package declaration (add "_test" since this is a test file) -// 2. Imports -// 3. Unit tests -// 4. Basic test -// 5. Disappears test -// 6. All the other tests -// 7. Helper functions (exists, destroy, check, etc.) -// 8. Functions that return Terraform configurations - -// TIP: ==== UNIT TESTS ==== -// This is an example of a unit test. Its name is not prefixed with -// "TestAcc" like an acceptance test. -// -// Unlike acceptance tests, unit tests do not access AWS and are focused on a -// function (or method). Because of this, they are quick and cheap to run. -// -// In designing a resource's implementation, isolate complex bits from AWS bits -// so that they can be tested through a unit test. We encourage more unit tests -// in the provider. -// -// Cut and dry functions using well-used patterns, like typical flatteners and -// expanders, don't need unit testing. However, if they are complex or -// intricate, they should be unit tested. -func TestDataSourceExampleUnitTest(t *testing.T) { - t.Parallel() - - testCases := []struct { - TestName string - Input string - Expected string - Error bool - }{ - { - TestName: "empty", - Input: "", - Expected: "", - Error: true, - }, - { - TestName: "descriptive name", - Input: "some input", - Expected: "some output", - Error: false, - }, - { - TestName: "another descriptive name", - Input: "more input", - Expected: "more output", - Error: false, - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.TestName, func(t *testing.T) { - t.Parallel() - got, err := tfbedrockagent.FunctionFromResource(testCase.Input) - - if err != nil && !testCase.Error { - t.Errorf("got error (%s), expected no error", err) - } - - if err == nil && testCase.Error { - t.Errorf("got (%s) and no error, expected error", got) - } - - if got != testCase.Expected { - t.Errorf("got %s, expected %s", got, testCase.Expected) - } - }) - } -} - -// TIP: ==== ACCEPTANCE TESTS ==== -// This is an example of a basic acceptance test. This should test as much of -// standard functionality of the resource as possible, and test importing, if -// applicable. We prefix its name with "TestAcc", the service, and the -// resource name. -// -// Acceptance test access AWS and cost money to run. -func TestAccBedrockAgentDataSource_basic(t *testing.T) { +// Prerequisites: +// * psql run via null_resource/provisioner "local-exec" +func testAccDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - // TIP: This is a long-running test guard for tests that run longer than - // 300s (5 min) generally. - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var datasource bedrockagent.DescribeDataSourceResponse + var dataSource types.DataSource rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrockagent_data_source.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.BedrockAgentEndpointID) - testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -166,58 +38,36 @@ func TestAccBedrockAgentDataSource_basic(t *testing.T) { { Config: testAccDataSourceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceExists(ctx, resourceName, &datasource), - resource.TestCheckResourceAttr(resourceName, "auto_minor_version_upgrade", "false"), - resource.TestCheckResourceAttrSet(resourceName, "maintenance_window_start_time.0.day_of_week"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user.*", map[string]string{ - "console_access": "false", - "groups.#": "0", - "username": "Test", - "password": "TestTest1234", - }), - acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "bedrockagent", regexache.MustCompile(`datasource:+.`)), + testAccCheckDataSourceBaseExists(ctx, resourceName, &dataSource), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately", "user"}, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, }, }) } -func TestAccBedrockAgentDataSource_disappears(t *testing.T) { +func testAccDataSource_disappears(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var datasource bedrockagent.DescribeDataSourceResponse + var dataSource types.DataSource rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrockagent_data_source.test" - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, names.BedrockAgentEndpointID) - testAccPreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataSourceConfig_basic(rName, testAccDataSourceVersionNewer), + Config: testAccDataSourceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceExists(ctx, resourceName, &datasource), - // TIP: The Plugin-Framework disappears helper is similar to the Plugin-SDK version, - // but expects a new resource factory function as the third argument. To expose this - // private function to the testing package, you may need to add a line like the following - // to exports_test.go: - // - // var ResourceDataSource = newResourceDataSource + testAccCheckDataSourceBaseExists(ctx, resourceName, &dataSource), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfbedrockagent.ResourceDataSource, resourceName), ), ExpectNonEmptyPlan: true, @@ -235,99 +85,56 @@ func testAccCheckDataSourceDestroy(ctx context.Context) resource.TestCheckFunc { continue } - input := &bedrockagent.DescribeDataSourceInput{ - DataSourceId: aws.String(rs.Primary.ID), - } - _, err := conn.DescribeDataSource(ctx, &bedrockagent.DescribeDataSourceInput{ - DataSourceId: aws.String(rs.Primary.ID), - }) - if errs.IsA[*types.ResourceNotFoundException](err) { - return nil + _, err := tfbedrockagent.FindDataSourceByID(ctx, conn, rs.Primary.ID, rs.Primary.Attributes["knowledge_base_id"]) + + if tfresource.NotFound(err) { + continue } + if err != nil { - return create.Error(names.BedrockAgent, create.ErrActionCheckingDestroyed, tfbedrockagent.ResNameDataSource, rs.Primary.ID, err) + return err } - return create.Error(names.BedrockAgent, create.ErrActionCheckingDestroyed, tfbedrockagent.ResNameDataSource, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("Bedrock Agent Data Source %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckDataSourceExists(ctx context.Context, name string, datasource *bedrockagent.DescribeDataSourceResponse) resource.TestCheckFunc { +func testAccCheckDataSourceBaseExists(ctx context.Context, n string, v *types.DataSource) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.BedrockAgent, create.ErrActionCheckingExistence, tfbedrockagent.ResNameDataSource, name, errors.New("not found")) - } - - if rs.Primary.ID == "" { - return create.Error(names.BedrockAgent, create.ErrActionCheckingExistence, tfbedrockagent.ResNameDataSource, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentClient(ctx) - resp, err := conn.DescribeDataSource(ctx, &bedrockagent.DescribeDataSourceInput{ - DataSourceId: aws.String(rs.Primary.ID), - }) + + output, err := tfbedrockagent.FindDataSourceByID(ctx, conn, rs.Primary.ID, rs.Primary.Attributes["knowledge_base_id"]) if err != nil { - return create.Error(names.BedrockAgent, create.ErrActionCheckingExistence, tfbedrockagent.ResNameDataSource, rs.Primary.ID, err) + return err } - *datasource = *resp + *v = *output return nil } } -func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentClient(ctx) - - input := &bedrockagent.ListDataSourcesInput{} - _, err := conn.ListDataSources(ctx, input) - - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} - -func testAccCheckDataSourceNotRecreated(before, after *bedrockagent.DescribeDataSourceResponse) resource.TestCheckFunc { - return func(s *terraform.State) error { - if before, after := aws.ToString(before.DataSourceId), aws.ToString(after.DataSourceId); before != after { - return create.Error(names.BedrockAgent, create.ErrActionCheckingNotRecreated, tfbedrockagent.ResNameDataSource, aws.ToString(before.DataSourceId), errors.New("recreated")) - } - - return nil - } -} - -func testAccDataSourceConfig_basic(rName, version string) string { +func testAccDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` -resource "aws_security_group" "test" { - name = %[1]q -} - resource "aws_bedrockagent_data_source" "test" { - data_source_name = %[1]q - engine_type = "ActiveBedrockAgent" - engine_version = %[2]q - host_instance_type = "bedrockagent.t2.micro" - security_groups = [aws_security_group.test.id] - authentication_strategy = "simple" - storage_type = "efs" + name = %[1]q + knowledge_base_id = "kb_id" - logs { - general = true - } - - user { - username = "Test" - password = "TestTest1234" + data_source_configuration { + type = "S3" + s3_configuration { + bucket_arn = "bucket_arn" + } } } -`, rName, version) +`, rName) } diff --git a/internal/service/bedrockagent/exports_test.go b/internal/service/bedrockagent/exports_test.go index a13d3c2a991..fa431e71051 100644 --- a/internal/service/bedrockagent/exports_test.go +++ b/internal/service/bedrockagent/exports_test.go @@ -9,9 +9,11 @@ var ( ResourceAgentActionGroup = newAgentActionGroupResource ResourceAgentAlias = newAgentAliasResource ResourceKnowledgeBase = newKnowledgeBaseResource + ResourceDataSource = newDataSourceResource FindAgentActionGroupByThreePartKey = findAgentActionGroupByThreePartKey FindAgentAliasByTwoPartKey = findAgentAliasByTwoPartKey FindAgentByID = findAgentByID FindKnowledgeBaseByID = findKnowledgeBaseByID + FindDataSourceByID = findDataSourceByID ) From c479acb25459c7816f5c2044b23ac81952cac0a2 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Mon, 29 Apr 2024 00:09:31 +0100 Subject: [PATCH 03/15] Adding first test --- .../service/bedrockagent/bedrockagent_test.go | 4 ++ internal/service/bedrockagent/data_source.go | 40 ++++++++++++------- .../service/bedrockagent/data_source_test.go | 11 +++-- .../bedrockagent/service_package_gen.go | 4 ++ 4 files changed, 39 insertions(+), 20 deletions(-) diff --git a/internal/service/bedrockagent/bedrockagent_test.go b/internal/service/bedrockagent/bedrockagent_test.go index c3938f7a64f..d663842e4b2 100644 --- a/internal/service/bedrockagent/bedrockagent_test.go +++ b/internal/service/bedrockagent/bedrockagent_test.go @@ -20,6 +20,10 @@ func TestAccBedrockAgent_serial(t *testing.T) { "basicOpenSearch": testAccKnowledgeBase_basicOpenSearch, "updateOpenSearch": testAccKnowledgeBase_updateOpenSearch, }, + "DataSource": { + // "basic": testAccDataSource_basic, + "disappears": testAccDataSource_disappears, + }, } acctest.RunSerialTests2Levels(t, testCases, 0) diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index 6ff5be8f221..aac1d372bad 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -60,12 +60,28 @@ func (r *dataSourceResource) Metadata(_ context.Context, req resource.MetadataRe func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ + "created_at": schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, "data_deletion_policy": schema.StringAttribute{ Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, "description": schema.StringAttribute{ Optional: true, }, + "failure_reasons": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + }, "id": framework.IDAttribute(), "knowledge_base_id": schema.StringAttribute{ Required: true, @@ -76,20 +92,22 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ stringplanmodifier.RequiresReplace(), }, }, + "updated_at": schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + }, }, Blocks: map[string]schema.Block{ "server_side_encryption_configuration": schema.ListNestedBlock{ CustomType: fwtypes.NewListNestedObjectTypeOf[serverSideEncryptionConfigurationModel](ctx), Validators: []validator.List{ - listvalidator.IsRequired(), - listvalidator.SizeAtLeast(1), listvalidator.SizeAtMost(1), }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "kms_key_arn": schema.StringAttribute{ CustomType: fwtypes.ARNType, - Required: true, + Optional: true, }, }, }, @@ -111,8 +129,6 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ "s3_configuration": schema.ListNestedBlock{ CustomType: fwtypes.NewListNestedObjectTypeOf[s3ConfigurationModel](ctx), Validators: []validator.List{ - listvalidator.IsRequired(), - listvalidator.SizeAtLeast(1), listvalidator.SizeAtMost(1), }, NestedObject: schema.NestedBlockObject{ @@ -122,7 +138,7 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ Required: true, }, "bucket_owner_account_id": schema.StringAttribute{ - Required: true, + Optional: true, }, "inclusion_prefixes": schema.SetAttribute{ CustomType: fwtypes.SetOfStringType, @@ -138,8 +154,6 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ "vector_ingestion_configuration": schema.ListNestedBlock{ CustomType: fwtypes.NewListNestedObjectTypeOf[vectorIngestionConfigurationModel](ctx), Validators: []validator.List{ - listvalidator.IsRequired(), - listvalidator.SizeAtLeast(1), listvalidator.SizeAtMost(1), }, NestedObject: schema.NestedBlockObject{ @@ -424,17 +438,17 @@ func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id, kbID s } func findDataSourceByID(ctx context.Context, conn *bedrockagent.Client, id, kbID string) (*awstypes.DataSource, error) { - in := &bedrockagent.GetDataSourceInput{ + input := &bedrockagent.GetDataSourceInput{ DataSourceId: aws.String(id), KnowledgeBaseId: aws.String(kbID), } - out, err := conn.GetDataSource(ctx, in) + out, err := conn.GetDataSource(ctx, input) if err != nil { if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, - LastRequest: in, + LastRequest: input, } } @@ -442,7 +456,7 @@ func findDataSourceByID(ctx context.Context, conn *bedrockagent.Client, id, kbID } if out == nil || out.DataSource == nil { - return nil, tfresource.NewEmptyResultError(in) + return nil, tfresource.NewEmptyResultError(input) } return out.DataSource, nil @@ -459,8 +473,6 @@ type dataSourceResourceModel struct { Name types.String `tfsdk:"name"` ServerSideEncryptionConfiguration fwtypes.ListNestedObjectValueOf[serverSideEncryptionConfigurationModel] `tfsdk:"server_side_encryption_configuration"` VectorIngestionConfiguration fwtypes.ListNestedObjectValueOf[vectorIngestionConfigurationModel] `tfsdk:"vector_ingestion_configuration"` - Tags types.Map `tfsdk:"tags"` - TagsAll types.Map `tfsdk:"tags_all"` Timeouts timeouts.Value `tfsdk:"timeouts"` UpdatedAt timetypes.RFC3339 `tfsdk:"updated_at"` } diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index fe7520989f1..233dce6f39f 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -19,9 +19,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// Prerequisites: -// * psql run via null_resource/provisioner "local-exec" -func testAccDataSource_basic(t *testing.T) { +func TestAccDataSource_basic(t *testing.T) { ctx := acctest.Context(t) var dataSource types.DataSource rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -126,13 +124,14 @@ func testAccCheckDataSourceBaseExists(ctx context.Context, n string, v *types.Da func testAccDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_bedrockagent_data_source" "test" { - name = %[1]q - knowledge_base_id = "kb_id" + name = %[1]q + knowledge_base_id = "RFYQS34LF7" + data_deletion_policy = "RETAIN" data_source_configuration { type = "S3" s3_configuration { - bucket_arn = "bucket_arn" + bucket_arn = "arn:aws:s3:::aws-security-data-lake-eu-west-1-8rvl0sowjqqdgyw4nhwlqpaimqddah" } } } diff --git a/internal/service/bedrockagent/service_package_gen.go b/internal/service/bedrockagent/service_package_gen.go index 5c317d31b56..74418ac11b4 100644 --- a/internal/service/bedrockagent/service_package_gen.go +++ b/internal/service/bedrockagent/service_package_gen.go @@ -38,6 +38,10 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic IdentifierAttribute: "agent_arn", }, }, + { + Factory: newDataSourceResource, + Name: "Data Source", + }, { Factory: newKnowledgeBaseResource, Name: "Knowledge Base", From b5e3ba2254db15b54e5bd65163e8de13cc91dc84 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Mon, 29 Apr 2024 14:59:07 +0100 Subject: [PATCH 04/15] First test passed succesfully --- internal/service/bedrockagent/data_source.go | 72 +++++++++++++++---- .../service/bedrockagent/data_source_test.go | 18 ++--- 2 files changed, 67 insertions(+), 23 deletions(-) diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index aac1d372bad..7627c2d5615 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -26,6 +26,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/framework" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" @@ -67,6 +68,9 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ stringplanmodifier.UseStateForUnknown(), }, }, + "data_source_id": schema.StringAttribute{ + Computed: true, + }, "data_deletion_policy": schema.StringAttribute{ Optional: true, Computed: true, @@ -230,8 +234,16 @@ func (r *dataSourceResource) Create(ctx context.Context, request resource.Create ds := outputRaw.(*bedrockagent.CreateDataSourceOutput).DataSource data.DataSourceID = fwflex.StringToFramework(ctx, ds.DataSourceId) data.KnowledgeBaseID = fwflex.StringToFramework(ctx, ds.KnowledgeBaseId) + data.setID() + + parts, err := flex.ExpandResourceId(data.ID.ValueString(), dataSourceResourceIdPartCount, false) + if err != nil { + response.Diagnostics.AddError("Creating Bedrock Agent Data Source", err.Error()) + + return + } - ds, err = waitDataSourceCreated(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + ds, err = waitDataSourceCreated(ctx, conn, parts[0], parts[1], r.CreateTimeout(ctx, data.Timeouts)) if err != nil { response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Data Source (%s) create", data.DataSourceID.ValueString()), err.Error()) @@ -256,7 +268,14 @@ func (r *dataSourceResource) Read(ctx context.Context, request resource.ReadRequ conn := r.Meta().BedrockAgentClient(ctx) - ds, err := findDataSourceByID(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString()) + parts, err := flex.ExpandResourceId(data.ID.ValueString(), dataSourceResourceIdPartCount, false) + if err != nil { + response.Diagnostics.AddError("Reading Bedrock Agent Data Source", err.Error()) + + return + } + + ds, err := findDataSourceByID(ctx, conn, parts[0], parts[1]) if tfresource.NotFound(err) { response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) @@ -315,7 +334,14 @@ func (r *dataSourceResource) Update(ctx context.Context, request resource.Update return } - ds, err := waitDataSourceUpdated(ctx, conn, new.DataSourceID.ValueString(), new.KnowledgeBaseID.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)) + parts, err := flex.ExpandResourceId(new.ID.ValueString(), dataSourceResourceIdPartCount, false) + if err != nil { + response.Diagnostics.AddError("Updating Bedrock Agent Data Source", err.Error()) + + return + } + + ds, err := waitDataSourceUpdated(ctx, conn, parts[0], parts[1], r.UpdateTimeout(ctx, new.Timeouts)) if err != nil { response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Data Source (%s) update", new.DataSourceID.ValueString()), err.Error()) @@ -357,7 +383,14 @@ func (r *dataSourceResource) Delete(ctx context.Context, request resource.Delete return } - _, err = waitDataSourceDeleted(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)) + parts, err := flex.ExpandResourceId(data.ID.ValueString(), dataSourceResourceIdPartCount, false) + if err != nil { + response.Diagnostics.AddError("Deleting Bedrock Agent Data Source", err.Error()) + + return + } + + _, err = waitDataSourceDeleted(ctx, conn, parts[0], parts[1], r.DeleteTimeout(ctx, data.Timeouts)) if err != nil { response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Knowledge Base (%s) delete", data.KnowledgeBaseID.ValueString()), err.Error()) @@ -370,11 +403,11 @@ func (r *dataSourceResource) ImportState(ctx context.Context, req resource.Impor resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } -func waitDataSourceCreated(ctx context.Context, conn *bedrockagent.Client, id, kbID string, timeout time.Duration) (*awstypes.DataSource, error) { +func waitDataSourceCreated(ctx context.Context, conn *bedrockagent.Client, id, kbId string, timeout time.Duration) (*awstypes.DataSource, error) { stateConf := &retry.StateChangeConf{ Pending: []string{}, Target: enum.Slice(awstypes.DataSourceStatusAvailable), - Refresh: statusDataSource(ctx, conn, id, kbID), + Refresh: statusDataSource(ctx, conn, id, kbId), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, @@ -388,11 +421,11 @@ func waitDataSourceCreated(ctx context.Context, conn *bedrockagent.Client, id, k return nil, err } -func waitDataSourceUpdated(ctx context.Context, conn *bedrockagent.Client, id, kbID string, timeout time.Duration) (*awstypes.DataSource, error) { +func waitDataSourceUpdated(ctx context.Context, conn *bedrockagent.Client, id, kbId string, timeout time.Duration) (*awstypes.DataSource, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.DataSourceStatusAvailable), Target: enum.Slice(awstypes.DataSourceStatusAvailable), - Refresh: statusDataSource(ctx, conn, id, kbID), + Refresh: statusDataSource(ctx, conn, id, kbId), Timeout: timeout, NotFoundChecks: 20, ContinuousTargetOccurence: 2, @@ -406,11 +439,11 @@ func waitDataSourceUpdated(ctx context.Context, conn *bedrockagent.Client, id, k return nil, err } -func waitDataSourceDeleted(ctx context.Context, conn *bedrockagent.Client, id, kbID string, timeout time.Duration) (*awstypes.DataSource, error) { +func waitDataSourceDeleted(ctx context.Context, conn *bedrockagent.Client, id, kbId string, timeout time.Duration) (*awstypes.DataSource, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.DataSourceStatusDeleting), Target: []string{}, - Refresh: statusDataSource(ctx, conn, id, kbID), + Refresh: statusDataSource(ctx, conn, id, kbId), Timeout: timeout, } @@ -422,9 +455,9 @@ func waitDataSourceDeleted(ctx context.Context, conn *bedrockagent.Client, id, k return nil, err } -func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id, kbID string) retry.StateRefreshFunc { +func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id, kbId string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := findDataSourceByID(ctx, conn, id, kbID) + output, err := findDataSourceByID(ctx, conn, id, kbId) if tfresource.NotFound(err) { return nil, "", nil } @@ -437,10 +470,10 @@ func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id, kbID s } } -func findDataSourceByID(ctx context.Context, conn *bedrockagent.Client, id, kbID string) (*awstypes.DataSource, error) { +func findDataSourceByID(ctx context.Context, conn *bedrockagent.Client, id, kbId string) (*awstypes.DataSource, error) { input := &bedrockagent.GetDataSourceInput{ DataSourceId: aws.String(id), - KnowledgeBaseId: aws.String(kbID), + KnowledgeBaseId: aws.String(kbId), } out, err := conn.GetDataSource(ctx, input) @@ -466,7 +499,8 @@ type dataSourceResourceModel struct { CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` DataDeletionPolicy types.String `tfsdk:"data_deletion_policy"` DataSourceConfiguration fwtypes.ListNestedObjectValueOf[dataSourceConfigurationModel] `tfsdk:"data_source_configuration"` - DataSourceID types.String `tfsdk:"id"` + DataSourceID types.String `tfsdk:"data_source_id"` + ID types.String `tfsdk:"id"` Description types.String `tfsdk:"description"` FailureReasons fwtypes.ListValueOf[types.String] `tfsdk:"failure_reasons"` KnowledgeBaseID types.String `tfsdk:"knowledge_base_id"` @@ -477,6 +511,14 @@ type dataSourceResourceModel struct { UpdatedAt timetypes.RFC3339 `tfsdk:"updated_at"` } +const ( + dataSourceResourceIdPartCount = 2 +) + +func (data *dataSourceResourceModel) setID() { + data.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString()}, dataSourceResourceIdPartCount, false))) +} + type dataSourceConfigurationModel struct { Type types.String `tfsdk:"type"` S3Configuration fwtypes.ListNestedObjectValueOf[s3ConfigurationModel] `tfsdk:"s3_configuration"` diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index 233dce6f39f..5f2bc901320 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -6,6 +6,7 @@ package bedrockagent_test import ( "context" "fmt" + "strings" "testing" "github.com/aws/aws-sdk-go-v2/service/bedrockagent/types" @@ -82,8 +83,8 @@ func testAccCheckDataSourceDestroy(ctx context.Context) resource.TestCheckFunc { if rs.Type != "aws_bedrockagent_data_source" { continue } - - _, err := tfbedrockagent.FindDataSourceByID(ctx, conn, rs.Primary.ID, rs.Primary.Attributes["knowledge_base_id"]) + parts := strings.Split(rs.Primary.ID, ",") + _, err := tfbedrockagent.FindDataSourceByID(ctx, conn, parts[0], parts[1]) if tfresource.NotFound(err) { continue @@ -109,7 +110,8 @@ func testAccCheckDataSourceBaseExists(ctx context.Context, n string, v *types.Da conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentClient(ctx) - output, err := tfbedrockagent.FindDataSourceByID(ctx, conn, rs.Primary.ID, rs.Primary.Attributes["knowledge_base_id"]) + parts := strings.Split(rs.Primary.ID, ",") + output, err := tfbedrockagent.FindDataSourceByID(ctx, conn, parts[0], parts[1]) if err != nil { return err @@ -124,16 +126,16 @@ func testAccCheckDataSourceBaseExists(ctx context.Context, n string, v *types.Da func testAccDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_bedrockagent_data_source" "test" { - name = %[1]q - knowledge_base_id = "RFYQS34LF7" - data_deletion_policy = "RETAIN" +name = %[1]q +knowledge_base_id = "RFYQS34LF7" +data_deletion_policy = "RETAIN" - data_source_configuration { +data_source_configuration { type = "S3" s3_configuration { bucket_arn = "arn:aws:s3:::aws-security-data-lake-eu-west-1-8rvl0sowjqqdgyw4nhwlqpaimqddah" } - } + } } `, rName) } From f403bd6d3725035d882c649a838f0a3da33a46b9 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Mon, 29 Apr 2024 15:38:23 +0100 Subject: [PATCH 05/15] First tests run succesfully need to update on end to end tests --- .../service/bedrockagent/bedrockagent_test.go | 3 +- internal/service/bedrockagent/data_source.go | 14 ++-- .../service/bedrockagent/data_source_test.go | 77 ++++++++++++++++--- 3 files changed, 75 insertions(+), 19 deletions(-) diff --git a/internal/service/bedrockagent/bedrockagent_test.go b/internal/service/bedrockagent/bedrockagent_test.go index d663842e4b2..ac69a9db24e 100644 --- a/internal/service/bedrockagent/bedrockagent_test.go +++ b/internal/service/bedrockagent/bedrockagent_test.go @@ -21,8 +21,9 @@ func TestAccBedrockAgent_serial(t *testing.T) { "updateOpenSearch": testAccKnowledgeBase_updateOpenSearch, }, "DataSource": { - // "basic": testAccDataSource_basic, + "basic": testAccDataSource_basic, "disappears": testAccDataSource_disappears, + "full": testAccDataSource_full, }, } diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index 7627c2d5615..86215764ab0 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -181,11 +181,11 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ }, NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ - "max_tokens": schema.NumberAttribute{ - Optional: true, + "max_tokens": schema.Int64Attribute{ + Required: true, }, - "overlap_percentage": schema.NumberAttribute{ - Optional: true, + "overlap_percentage": schema.Int64Attribute{ + Required: true, }, }, }, @@ -535,7 +535,7 @@ type serverSideEncryptionConfigurationModel struct { } type vectorIngestionConfigurationModel struct { - ChunkingConfiguration fwtypes.ListNestedObjectValueOf[chunkingConfigurationModel] `tfsdk:"chunking_Configuration"` + ChunkingConfiguration fwtypes.ListNestedObjectValueOf[chunkingConfigurationModel] `tfsdk:"chunking_configuration"` } type chunkingConfigurationModel struct { @@ -544,6 +544,6 @@ type chunkingConfigurationModel struct { } type fixedSizeChunkingConfigurationModel struct { - MaxTokens types.Number `tfsdk:"max_tokens"` - OverlapPercentage types.Number `tfsdk:"overlap_percentage"` + MaxTokens types.Int64 `tfsdk:"max_tokens"` + OverlapPercentage types.Int64 `tfsdk:"overlap_percentage"` } diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index 5f2bc901320..363be4edfbe 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -20,7 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccDataSource_basic(t *testing.T) { +func testAccDataSource_basic(t *testing.T) { ctx := acctest.Context(t) var dataSource types.DataSource rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -49,6 +49,35 @@ func TestAccDataSource_basic(t *testing.T) { }) } +func testAccDataSource_full(t *testing.T) { + ctx := acctest.Context(t) + var dataSource types.DataSource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagent_data_source.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDataSourceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceConfig_full(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataSourceBaseExists(ctx, resourceName, &dataSource), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccDataSource_disappears(t *testing.T) { ctx := acctest.Context(t) var dataSource types.DataSource @@ -126,16 +155,42 @@ func testAccCheckDataSourceBaseExists(ctx context.Context, n string, v *types.Da func testAccDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_bedrockagent_data_source" "test" { -name = %[1]q -knowledge_base_id = "RFYQS34LF7" -data_deletion_policy = "RETAIN" - -data_source_configuration { - type = "S3" - s3_configuration { - bucket_arn = "arn:aws:s3:::aws-security-data-lake-eu-west-1-8rvl0sowjqqdgyw4nhwlqpaimqddah" - } - } + name = %[1]q + knowledge_base_id = "xxxx" + data_deletion_policy = "RETAIN" + + data_source_configuration { + type = "S3" + s3_configuration { + bucket_arn = "arn:aws:xxxx" + } + } +} +`, rName) +} + +func testAccDataSourceConfig_full(rName string) string { + return fmt.Sprintf(` +resource "aws_bedrockagent_data_source" "test" { + name = %[1]q + knowledge_base_id = "xxx" + data_deletion_policy = "RETAIN" + + data_source_configuration { + type = "S3" + s3_configuration { + bucket_arn = "arn:aws:s3:::xxxxx" + } + } + vector_ingestion_configuration { + chunking_configuration { + chunking_strategy = "FIXED_SIZE" + fixed_size_chunking_configuration { + max_tokens = 3 + overlap_percentage = 80 + } + } + } } `, rName) } From 030d6f0ee714663754bf94a7dc03d13b5fd9bf44 Mon Sep 17 00:00:00 2001 From: markos kandylis Date: Mon, 29 Apr 2024 15:43:14 +0100 Subject: [PATCH 06/15] Added changelog --- .changelog/37158.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/37158.txt diff --git a/.changelog/37158.txt b/.changelog/37158.txt new file mode 100644 index 00000000000..5c1969082a1 --- /dev/null +++ b/.changelog/37158.txt @@ -0,0 +1,3 @@ +``release-note:new-resource +aws_bedrockagent_data_source +``` \ No newline at end of file From 1642ae385e83d7f7e664c6f59790d97b126b75a2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 2 May 2024 15:08:50 -0400 Subject: [PATCH 07/15] r/aws_bedrockagent_data_source: Tidy up. --- internal/service/bedrockagent/data_source.go | 354 ++++++++---------- .../service/bedrockagent/data_source_test.go | 16 +- internal/service/bedrockagent/exports_test.go | 2 +- .../r/bedrockagent_data_source.html.markdown | 14 +- 4 files changed, 160 insertions(+), 226 deletions(-) diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index 86215764ab0..26b8b08a7f8 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -5,16 +5,18 @@ package bedrockagent import ( "context" + "errors" "fmt" "time" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/bedrockagent" awstypes "github.com/aws/aws-sdk-go-v2/service/bedrockagent/types" "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" - "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" - "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" @@ -30,16 +32,17 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/framework" fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) -// Function annotations are used for resource registration to the Provider. DO NOT EDIT. // @FrameworkResource(name="Data Source") func newDataSourceResource(_ context.Context) (resource.ResourceWithConfigure, error) { r := &dataSourceResource{} r.SetDefaultCreateTimeout(30 * time.Minute) - r.SetDefaultUpdateTimeout(30 * time.Minute) r.SetDefaultDeleteTimeout(30 * time.Minute) return r, nil @@ -51,18 +54,20 @@ const ( type dataSourceResource struct { framework.ResourceWithConfigure + framework.WithImportByID framework.WithTimeouts } -func (r *dataSourceResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = "aws_bedrockagent_data_source" +func (*dataSourceResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_bedrockagent_data_source" } -func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *dataSourceResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ - "created_at": schema.StringAttribute{ - CustomType: timetypes.RFC3339Type{}, + "data_deletion_policy": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.DataDeletionPolicy](), + Optional: true, Computed: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.UseStateForUnknown(), @@ -71,22 +76,13 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ "data_source_id": schema.StringAttribute{ Computed: true, }, - "data_deletion_policy": schema.StringAttribute{ - Optional: true, - Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, - }, "description": schema.StringAttribute{ Optional: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 200), + }, }, - "failure_reasons": schema.ListAttribute{ - CustomType: fwtypes.ListOfStringType, - ElementType: types.StringType, - Computed: true, - }, - "id": framework.IDAttribute(), + names.AttrID: framework.IDAttribute(), "knowledge_base_id": schema.StringAttribute{ Required: true, }, @@ -95,27 +91,12 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, - }, - "updated_at": schema.StringAttribute{ - CustomType: timetypes.RFC3339Type{}, - Computed: true, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexache.MustCompile(`^([0-9a-zA-Z][_-]?){1,100}$`), "valid characters are a-z, A-Z, 0-9, _ (underscore) and - (hyphen). The name can have up to 100 characters"), + }, }, }, Blocks: map[string]schema.Block{ - "server_side_encryption_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[serverSideEncryptionConfigurationModel](ctx), - Validators: []validator.List{ - listvalidator.SizeAtMost(1), - }, - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "kms_key_arn": schema.StringAttribute{ - CustomType: fwtypes.ARNType, - Optional: true, - }, - }, - }, - }, "data_source_configuration": schema.ListNestedBlock{ CustomType: fwtypes.NewListNestedObjectTypeOf[dataSourceConfigurationModel](ctx), Validators: []validator.List{ @@ -126,12 +107,13 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "type": schema.StringAttribute{ - Required: true, + CustomType: fwtypes.StringEnumType[awstypes.DataSourceType](), + Required: true, }, }, Blocks: map[string]schema.Block{ "s3_configuration": schema.ListNestedBlock{ - CustomType: fwtypes.NewListNestedObjectTypeOf[s3ConfigurationModel](ctx), + CustomType: fwtypes.NewListNestedObjectTypeOf[s3DataSourceConfigurationModel](ctx), Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -143,6 +125,9 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ }, "bucket_owner_account_id": schema.StringAttribute{ Optional: true, + Validators: []validator.String{ + fwvalidators.AWSAccountID(), + }, }, "inclusion_prefixes": schema.SetAttribute{ CustomType: fwtypes.SetOfStringType, @@ -155,6 +140,24 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ }, }, }, + "server_side_encryption_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[serverSideEncryptionConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "kms_key_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + }, + }, + }, + }, + "timeouts": timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Delete: true, + }), "vector_ingestion_configuration": schema.ListNestedBlock{ CustomType: fwtypes.NewListNestedObjectTypeOf[vectorIngestionConfigurationModel](ctx), Validators: []validator.List{ @@ -170,7 +173,8 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "chunking_strategy": schema.StringAttribute{ - Required: true, + CustomType: fwtypes.StringEnumType[awstypes.ChunkingStrategy](), + Required: true, }, }, Blocks: map[string]schema.Block{ @@ -183,9 +187,15 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ Attributes: map[string]schema.Attribute{ "max_tokens": schema.Int64Attribute{ Required: true, + Validators: []validator.Int64{ + int64validator.AtLeast(1), + }, }, "overlap_percentage": schema.Int64Attribute{ Required: true, + Validators: []validator.Int64{ + int64validator.Between(1, 99), + }, }, }, }, @@ -196,11 +206,6 @@ func (r *dataSourceResource) Schema(ctx context.Context, req resource.SchemaRequ }, }, }, - "timeouts": timeouts.Block(ctx, timeouts.Opts{ - Create: true, - Update: true, - Delete: true, - }), }, } } @@ -219,6 +224,7 @@ func (r *dataSourceResource) Create(ctx context.Context, request resource.Create if response.Diagnostics.HasError() { return } + input.ClientToken = aws.String(id.UniqueId()) outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { @@ -231,31 +237,15 @@ func (r *dataSourceResource) Create(ctx context.Context, request resource.Create return } - ds := outputRaw.(*bedrockagent.CreateDataSourceOutput).DataSource - data.DataSourceID = fwflex.StringToFramework(ctx, ds.DataSourceId) - data.KnowledgeBaseID = fwflex.StringToFramework(ctx, ds.KnowledgeBaseId) + data.DataSourceID = fwflex.StringToFramework(ctx, outputRaw.(*bedrockagent.CreateDataSourceOutput).DataSource.DataSourceId) data.setID() - parts, err := flex.ExpandResourceId(data.ID.ValueString(), dataSourceResourceIdPartCount, false) - if err != nil { - response.Diagnostics.AddError("Creating Bedrock Agent Data Source", err.Error()) - - return - } - - ds, err = waitDataSourceCreated(ctx, conn, parts[0], parts[1], r.CreateTimeout(ctx, data.Timeouts)) - - if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Data Source (%s) create", data.DataSourceID.ValueString()), err.Error()) + if _, err := waitDataSourceCreated(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Data Source (%s) create", data.ID.ValueString()), err.Error()) return } - // Set values for unknowns after creation is complete. - data.CreatedAt = fwflex.TimeToFramework(ctx, ds.CreatedAt) - data.FailureReasons = fwflex.FlattenFrameworkStringValueListOfString(ctx, ds.FailureReasons) - data.UpdatedAt = fwflex.TimeToFramework(ctx, ds.UpdatedAt) - response.Diagnostics.Append(response.State.Set(ctx, data)...) } @@ -266,16 +256,15 @@ func (r *dataSourceResource) Read(ctx context.Context, request resource.ReadRequ return } - conn := r.Meta().BedrockAgentClient(ctx) - - parts, err := flex.ExpandResourceId(data.ID.ValueString(), dataSourceResourceIdPartCount, false) - if err != nil { - response.Diagnostics.AddError("Reading Bedrock Agent Data Source", err.Error()) + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) return } - ds, err := findDataSourceByID(ctx, conn, parts[0], parts[1]) + conn := r.Meta().BedrockAgentClient(ctx) + + ds, err := findDataSourceByTwoPartKey(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString()) if tfresource.NotFound(err) { response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) @@ -285,7 +274,7 @@ func (r *dataSourceResource) Read(ctx context.Context, request resource.ReadRequ } if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("reading Bedrock Agent Data Source (%s)", data.DataSourceID.ValueString()), err.Error()) + response.Diagnostics.AddError(fmt.Sprintf("reading Bedrock Agent Data Source (%s)", data.ID.ValueString()), err.Error()) return } @@ -311,49 +300,20 @@ func (r *dataSourceResource) Update(ctx context.Context, request resource.Update conn := r.Meta().BedrockAgentClient(ctx) - if !new.Description.Equal(old.Description) || - !new.DataDeletionPolicy.Equal(old.DataDeletionPolicy) || - !new.Name.Equal(old.Name) || - !new.DataSourceConfiguration.Equal(old.DataSourceConfiguration) || - !new.DataDeletionPolicy.Equal(old.DataDeletionPolicy) || - !new.ServerSideEncryptionConfiguration.Equal(old.ServerSideEncryptionConfiguration) || - !new.VectorIngestionConfiguration.Equal(old.VectorIngestionConfiguration) { - input := &bedrockagent.UpdateDataSourceInput{} - response.Diagnostics.Append(fwflex.Expand(ctx, new, input)...) - if response.Diagnostics.HasError() { - return - } - - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { - return conn.UpdateDataSource(ctx, input) - }, errCodeValidationException, "cannot assume role") - - if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("updating Bedrock Agent Data Source (%s)", new.DataSourceID.ValueString()), err.Error()) - - return - } - - parts, err := flex.ExpandResourceId(new.ID.ValueString(), dataSourceResourceIdPartCount, false) - if err != nil { - response.Diagnostics.AddError("Updating Bedrock Agent Data Source", err.Error()) - - return - } - - ds, err := waitDataSourceUpdated(ctx, conn, parts[0], parts[1], r.UpdateTimeout(ctx, new.Timeouts)) + input := &bedrockagent.UpdateDataSourceInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, new, input)...) + if response.Diagnostics.HasError() { + return + } - if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Data Source (%s) update", new.DataSourceID.ValueString()), err.Error()) + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { + return conn.UpdateDataSource(ctx, input) + }, errCodeValidationException, "cannot assume role") - return - } + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating Bedrock Agent Data Source (%s)", new.DataSourceID.ValueString()), err.Error()) - new.FailureReasons = fwflex.FlattenFrameworkStringValueListOfString(ctx, ds.FailureReasons) - new.UpdatedAt = fwflex.TimeToFramework(ctx, ds.UpdatedAt) - } else { - new.FailureReasons = old.FailureReasons - new.UpdatedAt = old.UpdatedAt + return } response.Diagnostics.Append(response.State.Set(ctx, &new)...) @@ -369,8 +329,8 @@ func (r *dataSourceResource) Delete(ctx context.Context, request resource.Delete conn := r.Meta().BedrockAgentClient(ctx) _, err := conn.DeleteDataSource(ctx, &bedrockagent.DeleteDataSourceInput{ - KnowledgeBaseId: aws.String(data.KnowledgeBaseID.ValueString()), DataSourceId: aws.String(data.DataSourceID.ValueString()), + KnowledgeBaseId: aws.String(data.KnowledgeBaseID.ValueString()), }) if errs.IsA[*awstypes.ResourceNotFoundException](err) { @@ -378,86 +338,48 @@ func (r *dataSourceResource) Delete(ctx context.Context, request resource.Delete } if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("deleting Bedrock Agent Data Source (%s)", data.DataSourceID.ValueString()), err.Error()) + response.Diagnostics.AddError(fmt.Sprintf("deleting Bedrock Agent Data Source (%s)", data.ID.ValueString()), err.Error()) return } - parts, err := flex.ExpandResourceId(data.ID.ValueString(), dataSourceResourceIdPartCount, false) - if err != nil { - response.Diagnostics.AddError("Deleting Bedrock Agent Data Source", err.Error()) + if _, err := waitDataSourceDeleted(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Data Source (%s) delete", data.ID.ValueString()), err.Error()) return } - - _, err = waitDataSourceDeleted(ctx, conn, parts[0], parts[1], r.DeleteTimeout(ctx, data.Timeouts)) - - if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Knowledge Base (%s) delete", data.KnowledgeBaseID.ValueString()), err.Error()) - - return - } -} - -func (r *dataSourceResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } -func waitDataSourceCreated(ctx context.Context, conn *bedrockagent.Client, id, kbId string, timeout time.Duration) (*awstypes.DataSource, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{}, - Target: enum.Slice(awstypes.DataSourceStatusAvailable), - Refresh: statusDataSource(ctx, conn, id, kbId), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 2, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*awstypes.DataSource); ok { - return out, err +func findDataSourceByTwoPartKey(ctx context.Context, conn *bedrockagent.Client, dataSourceID, knowledgeBaseID string) (*awstypes.DataSource, error) { + input := &bedrockagent.GetDataSourceInput{ + DataSourceId: aws.String(dataSourceID), + KnowledgeBaseId: aws.String(knowledgeBaseID), } - return nil, err -} - -func waitDataSourceUpdated(ctx context.Context, conn *bedrockagent.Client, id, kbId string, timeout time.Duration) (*awstypes.DataSource, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.DataSourceStatusAvailable), - Target: enum.Slice(awstypes.DataSourceStatusAvailable), - Refresh: statusDataSource(ctx, conn, id, kbId), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 2, - } + output, err := conn.GetDataSource(ctx, input) - outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*awstypes.DataSource); ok { - return out, err + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } } - return nil, err -} - -func waitDataSourceDeleted(ctx context.Context, conn *bedrockagent.Client, id, kbId string, timeout time.Duration) (*awstypes.DataSource, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.DataSourceStatusDeleting), - Target: []string{}, - Refresh: statusDataSource(ctx, conn, id, kbId), - Timeout: timeout, + if err != nil { + return nil, err } - outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*awstypes.DataSource); ok { - return out, err + if output == nil || output.DataSource == nil { + return nil, tfresource.NewEmptyResultError(input) } - return nil, err + return output.DataSource, nil } -func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id, kbId string) retry.StateRefreshFunc { +func statusDataSource(ctx context.Context, conn *bedrockagent.Client, dataSourceID, knowledgeBaseID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := findDataSourceByID(ctx, conn, id, kbId) + output, err := findDataSourceByTwoPartKey(ctx, conn, dataSourceID, knowledgeBaseID) + if tfresource.NotFound(err) { return nil, "", nil } @@ -470,68 +392,90 @@ func statusDataSource(ctx context.Context, conn *bedrockagent.Client, id, kbId s } } -func findDataSourceByID(ctx context.Context, conn *bedrockagent.Client, id, kbId string) (*awstypes.DataSource, error) { - input := &bedrockagent.GetDataSourceInput{ - DataSourceId: aws.String(id), - KnowledgeBaseId: aws.String(kbId), +func waitDataSourceCreated(ctx context.Context, conn *bedrockagent.Client, dataSourceID, knowledgeBaseID string, timeout time.Duration) (*awstypes.DataSource, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{}, + Target: enum.Slice(awstypes.DataSourceStatusAvailable), + Refresh: statusDataSource(ctx, conn, dataSourceID, knowledgeBaseID), + Timeout: timeout, } - out, err := conn.GetDataSource(ctx, input) - if err != nil { - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } + outputRaw, err := stateConf.WaitForStateContext(ctx) - return nil, err + if output, ok := outputRaw.(*awstypes.DataSource); ok { + tfresource.SetLastError(err, errors.Join(tfslices.ApplyToAll(output.FailureReasons, errors.New)...)) + + return output, err } - if out == nil || out.DataSource == nil { - return nil, tfresource.NewEmptyResultError(input) + return nil, err +} + +func waitDataSourceDeleted(ctx context.Context, conn *bedrockagent.Client, dataSourceID, knowledgeBaseID string, timeout time.Duration) (*awstypes.DataSource, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DataSourceStatusDeleting), + Target: []string{}, + Refresh: statusDataSource(ctx, conn, dataSourceID, knowledgeBaseID), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.DataSource); ok { + tfresource.SetLastError(err, errors.Join(tfslices.ApplyToAll(output.FailureReasons, errors.New)...)) + + return output, err } - return out.DataSource, nil + return nil, err } type dataSourceResourceModel struct { - CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` - DataDeletionPolicy types.String `tfsdk:"data_deletion_policy"` + DataDeletionPolicy fwtypes.StringEnum[awstypes.DataDeletionPolicy] `tfsdk:"data_deletion_policy"` DataSourceConfiguration fwtypes.ListNestedObjectValueOf[dataSourceConfigurationModel] `tfsdk:"data_source_configuration"` DataSourceID types.String `tfsdk:"data_source_id"` - ID types.String `tfsdk:"id"` Description types.String `tfsdk:"description"` - FailureReasons fwtypes.ListValueOf[types.String] `tfsdk:"failure_reasons"` + ID types.String `tfsdk:"id"` KnowledgeBaseID types.String `tfsdk:"knowledge_base_id"` Name types.String `tfsdk:"name"` ServerSideEncryptionConfiguration fwtypes.ListNestedObjectValueOf[serverSideEncryptionConfigurationModel] `tfsdk:"server_side_encryption_configuration"` - VectorIngestionConfiguration fwtypes.ListNestedObjectValueOf[vectorIngestionConfigurationModel] `tfsdk:"vector_ingestion_configuration"` Timeouts timeouts.Value `tfsdk:"timeouts"` - UpdatedAt timetypes.RFC3339 `tfsdk:"updated_at"` + VectorIngestionConfiguration fwtypes.ListNestedObjectValueOf[vectorIngestionConfigurationModel] `tfsdk:"vector_ingestion_configuration"` } const ( - dataSourceResourceIdPartCount = 2 + dataSourceResourceIDPartCount = 2 ) +func (m *dataSourceResourceModel) InitFromID() error { + parts, err := flex.ExpandResourceId(m.ID.ValueString(), dataSourceResourceIDPartCount, false) + if err != nil { + return err + } + + m.DataSourceID = types.StringValue(parts[0]) + m.KnowledgeBaseID = types.StringValue(parts[1]) + + return nil +} + func (data *dataSourceResourceModel) setID() { - data.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString()}, dataSourceResourceIdPartCount, false))) + data.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString()}, dataSourceResourceIDPartCount, false))) } type dataSourceConfigurationModel struct { - Type types.String `tfsdk:"type"` - S3Configuration fwtypes.ListNestedObjectValueOf[s3ConfigurationModel] `tfsdk:"s3_configuration"` + Type fwtypes.StringEnum[awstypes.DataSourceType] `tfsdk:"type"` + S3Configuration fwtypes.ListNestedObjectValueOf[s3DataSourceConfigurationModel] `tfsdk:"s3_configuration"` } -type s3ConfigurationModel struct { +type s3DataSourceConfigurationModel struct { BucketARN fwtypes.ARN `tfsdk:"bucket_arn"` - BucketOwnerAccountId types.String `tfsdk:"bucket_owner_account_id"` + BucketOwnerAccountID types.String `tfsdk:"bucket_owner_account_id"` InclusionPrefixes fwtypes.SetValueOf[types.String] `tfsdk:"inclusion_prefixes"` } type serverSideEncryptionConfigurationModel struct { - KmsKeyArn types.String `tfsdk:"kms_key_arn"` + KMSKeyARN fwtypes.ARN `tfsdk:"kms_key_arn"` } type vectorIngestionConfigurationModel struct { @@ -539,7 +483,7 @@ type vectorIngestionConfigurationModel struct { } type chunkingConfigurationModel struct { - ChunkingStrategy types.String `tfsdk:"chunking_strategy"` + ChunkingStrategy fwtypes.StringEnum[awstypes.ChunkingStrategy] `tfsdk:"chunking_strategy"` FixedSizeChunkingConfiguration fwtypes.ListNestedObjectValueOf[fixedSizeChunkingConfigurationModel] `tfsdk:"fixed_size_chunking_configuration"` } diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index 363be4edfbe..f66141c473d 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -6,7 +6,6 @@ package bedrockagent_test import ( "context" "fmt" - "strings" "testing" "github.com/aws/aws-sdk-go-v2/service/bedrockagent/types" @@ -37,7 +36,7 @@ func testAccDataSource_basic(t *testing.T) { { Config: testAccDataSourceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceBaseExists(ctx, resourceName, &dataSource), + testAccCheckDataSourceExists(ctx, resourceName, &dataSource), ), }, { @@ -66,7 +65,7 @@ func testAccDataSource_full(t *testing.T) { { Config: testAccDataSourceConfig_full(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceBaseExists(ctx, resourceName, &dataSource), + testAccCheckDataSourceExists(ctx, resourceName, &dataSource), ), }, { @@ -95,7 +94,7 @@ func testAccDataSource_disappears(t *testing.T) { { Config: testAccDataSourceConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckDataSourceBaseExists(ctx, resourceName, &dataSource), + testAccCheckDataSourceExists(ctx, resourceName, &dataSource), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfbedrockagent.ResourceDataSource, resourceName), ), ExpectNonEmptyPlan: true, @@ -112,8 +111,8 @@ func testAccCheckDataSourceDestroy(ctx context.Context) resource.TestCheckFunc { if rs.Type != "aws_bedrockagent_data_source" { continue } - parts := strings.Split(rs.Primary.ID, ",") - _, err := tfbedrockagent.FindDataSourceByID(ctx, conn, parts[0], parts[1]) + + _, err := tfbedrockagent.FindDataSourceByTwoPartKey(ctx, conn, rs.Primary.Attributes["data_source_id"], rs.Primary.Attributes["knowledge_base_id"]) if tfresource.NotFound(err) { continue @@ -130,7 +129,7 @@ func testAccCheckDataSourceDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckDataSourceBaseExists(ctx context.Context, n string, v *types.DataSource) resource.TestCheckFunc { +func testAccCheckDataSourceExists(ctx context.Context, n string, v *types.DataSource) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -139,8 +138,7 @@ func testAccCheckDataSourceBaseExists(ctx context.Context, n string, v *types.Da conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentClient(ctx) - parts := strings.Split(rs.Primary.ID, ",") - output, err := tfbedrockagent.FindDataSourceByID(ctx, conn, parts[0], parts[1]) + output, err := tfbedrockagent.FindDataSourceByTwoPartKey(ctx, conn, rs.Primary.Attributes["data_source_id"], rs.Primary.Attributes["knowledge_base_id"]) if err != nil { return err diff --git a/internal/service/bedrockagent/exports_test.go b/internal/service/bedrockagent/exports_test.go index 9357e7e5491..142b8c8dcff 100644 --- a/internal/service/bedrockagent/exports_test.go +++ b/internal/service/bedrockagent/exports_test.go @@ -16,6 +16,6 @@ var ( FindAgentActionGroupByThreePartKey = findAgentActionGroupByThreePartKey FindAgentAliasByTwoPartKey = findAgentAliasByTwoPartKey FindAgentKnowledgeBaseAssociationByThreePartID = findAgentKnowledgeBaseAssociationByThreePartKey - FindDataSourceByID = findDataSourceByID + FindDataSourceByTwoPartKey = findDataSourceByTwoPartKey FindKnowledgeBaseByID = findKnowledgeBaseByID ) diff --git a/website/docs/r/bedrockagent_data_source.html.markdown b/website/docs/r/bedrockagent_data_source.html.markdown index a27cea72812..f86af07d736 100644 --- a/website/docs/r/bedrockagent_data_source.html.markdown +++ b/website/docs/r/bedrockagent_data_source.html.markdown @@ -5,14 +5,7 @@ page_title: "AWS: aws_bedrockagent_data_source" description: |- Terraform resource for managing an AWS Agents for Amazon Bedrock Data Source. --- -` + # Resource: aws_bedrockagent_data_source Terraform resource for managing an AWS Agents for Amazon Bedrock Data Source. @@ -47,9 +40,8 @@ This resource exports the following attributes in addition to the arguments abov [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): -* `create` - (Default `60m`) -* `update` - (Default `180m`) -* `delete` - (Default `90m`) +* `create` - (Default `30m`) +* `delete` - (Default `30m`) ## Import From f30b88e3bc5725d2cf941ab0b464b318e847926f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 2 May 2024 15:18:00 -0400 Subject: [PATCH 08/15] r/aws_bedrockagent_data_source: Tidy up acceptance test configurations. --- .../service/bedrockagent/data_source_test.go | 77 +++++++++++++++---- 1 file changed, 62 insertions(+), 15 deletions(-) diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index f66141c473d..359a6611c12 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -19,11 +19,23 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) +// Prerequisites: +// * psql run via null_resource/provisioner "local-exec" +// * jq for parsing output from aws cli to retrieve postgres password func testAccDataSource_basic(t *testing.T) { + acctest.SkipIfExeNotOnPath(t, "psql") + acctest.SkipIfExeNotOnPath(t, "jq") + acctest.SkipIfExeNotOnPath(t, "aws") + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var dataSource types.DataSource rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrockagent_data_source.test" + foundationModel := "amazon.titan-embed-text-v1" resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -34,7 +46,7 @@ func testAccDataSource_basic(t *testing.T) { CheckDestroy: testAccCheckDataSourceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataSourceConfig_basic(rName), + Config: testAccDataSourceConfig_basic(rName, foundationModel), Check: resource.ComposeTestCheckFunc( testAccCheckDataSourceExists(ctx, resourceName, &dataSource), ), @@ -48,11 +60,23 @@ func testAccDataSource_basic(t *testing.T) { }) } +// Prerequisites: +// * psql run via null_resource/provisioner "local-exec" +// * jq for parsing output from aws cli to retrieve postgres password func testAccDataSource_full(t *testing.T) { + acctest.SkipIfExeNotOnPath(t, "psql") + acctest.SkipIfExeNotOnPath(t, "jq") + acctest.SkipIfExeNotOnPath(t, "aws") + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var dataSource types.DataSource rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrockagent_data_source.test" + foundationModel := "amazon.titan-embed-text-v1" resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -63,7 +87,7 @@ func testAccDataSource_full(t *testing.T) { CheckDestroy: testAccCheckDataSourceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataSourceConfig_full(rName), + Config: testAccDataSourceConfig_full(rName, foundationModel), Check: resource.ComposeTestCheckFunc( testAccCheckDataSourceExists(ctx, resourceName, &dataSource), ), @@ -77,11 +101,23 @@ func testAccDataSource_full(t *testing.T) { }) } +// Prerequisites: +// * psql run via null_resource/provisioner "local-exec" +// * jq for parsing output from aws cli to retrieve postgres password func testAccDataSource_disappears(t *testing.T) { + acctest.SkipIfExeNotOnPath(t, "psql") + acctest.SkipIfExeNotOnPath(t, "jq") + acctest.SkipIfExeNotOnPath(t, "aws") + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + var dataSource types.DataSource rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrockagent_data_source.test" + foundationModel := "amazon.titan-embed-text-v1" resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -92,7 +128,7 @@ func testAccDataSource_disappears(t *testing.T) { CheckDestroy: testAccCheckDataSourceDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccDataSourceConfig_basic(rName), + Config: testAccDataSourceConfig_basic(rName, foundationModel), Check: resource.ComposeTestCheckFunc( testAccCheckDataSourceExists(ctx, resourceName, &dataSource), acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfbedrockagent.ResourceDataSource, resourceName), @@ -150,39 +186,50 @@ func testAccCheckDataSourceExists(ctx context.Context, n string, v *types.DataSo } } -func testAccDataSourceConfig_basic(rName string) string { - return fmt.Sprintf(` +func testAccDataSourceConfig_base(rName, embeddingModel string) string { + return acctest.ConfigCompose(testAccKnowledgeBaseConfig_basicRDS(rName, embeddingModel), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} +`, rName)) +} + +func testAccDataSourceConfig_basic(rName, embeddingModel string) string { + return acctest.ConfigCompose(testAccDataSourceConfig_base(rName, embeddingModel), fmt.Sprintf(` resource "aws_bedrockagent_data_source" "test" { - name = %[1]q - knowledge_base_id = "xxxx" - data_deletion_policy = "RETAIN" + name = %[1]q + knowledge_base_id = aws_bedrockagent_knowledge_base.test.id data_source_configuration { type = "S3" + s3_configuration { - bucket_arn = "arn:aws:xxxx" + bucket_arn = aws_s3_bucket.test.arn } } } -`, rName) +`, rName)) } -func testAccDataSourceConfig_full(rName string) string { - return fmt.Sprintf(` +func testAccDataSourceConfig_full(rName, embeddingModel string) string { + return acctest.ConfigCompose(testAccDataSourceConfig_base(rName, embeddingModel), fmt.Sprintf(` resource "aws_bedrockagent_data_source" "test" { name = %[1]q - knowledge_base_id = "xxx" + knowledge_base_id = aws_bedrockagent_knowledge_base.test.id data_deletion_policy = "RETAIN" data_source_configuration { type = "S3" + s3_configuration { - bucket_arn = "arn:aws:s3:::xxxxx" + bucket_arn = aws_s3_bucket.test.arn } } + vector_ingestion_configuration { chunking_configuration { chunking_strategy = "FIXED_SIZE" + fixed_size_chunking_configuration { max_tokens = 3 overlap_percentage = 80 @@ -190,5 +237,5 @@ resource "aws_bedrockagent_data_source" "test" { } } } -`, rName) +`, rName)) } From ba928a90e79f21806ba4bba86c2a6af3a2d38b0f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 2 May 2024 15:18:48 -0400 Subject: [PATCH 09/15] Update 37158.txt --- .changelog/37158.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.changelog/37158.txt b/.changelog/37158.txt index 5c1969082a1..ac116376917 100644 --- a/.changelog/37158.txt +++ b/.changelog/37158.txt @@ -1,3 +1,3 @@ -``release-note:new-resource +```release-note:new-resource aws_bedrockagent_data_source -``` \ No newline at end of file +``` From bc99980a95a85c4fc982e124d69c40b1676a5ba4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 2 May 2024 15:31:33 -0400 Subject: [PATCH 10/15] r/aws_bedrockagent_data_source: Add 'ExternalProviders' to acceptance tests. --- .../service/bedrockagent/data_source_test.go | 24 ++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index 359a6611c12..08c58370bf0 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -43,7 +43,13 @@ func testAccDataSource_basic(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDataSourceDestroy(ctx), + ExternalProviders: map[string]resource.ExternalProvider{ + "null": { + Source: "hashicorp/null", + VersionConstraint: "3.2.2", + }, + }, + CheckDestroy: testAccCheckDataSourceDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccDataSourceConfig_basic(rName, foundationModel), @@ -84,7 +90,13 @@ func testAccDataSource_full(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDataSourceDestroy(ctx), + ExternalProviders: map[string]resource.ExternalProvider{ + "null": { + Source: "hashicorp/null", + VersionConstraint: "3.2.2", + }, + }, + CheckDestroy: testAccCheckDataSourceDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccDataSourceConfig_full(rName, foundationModel), @@ -125,7 +137,13 @@ func testAccDataSource_disappears(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckDataSourceDestroy(ctx), + ExternalProviders: map[string]resource.ExternalProvider{ + "null": { + Source: "hashicorp/null", + VersionConstraint: "3.2.2", + }, + }, + CheckDestroy: testAccCheckDataSourceDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccDataSourceConfig_basic(rName, foundationModel), From a54db8f4a15c2bc7bca84e3f947264f879b20a0b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 2 May 2024 16:43:42 -0400 Subject: [PATCH 11/15] Fix golangci-lint 'stylecheck'. --- internal/service/bedrockagent/data_source.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index 26b8b08a7f8..6b2b751ca96 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -459,8 +459,8 @@ func (m *dataSourceResourceModel) InitFromID() error { return nil } -func (data *dataSourceResourceModel) setID() { - data.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString()}, dataSourceResourceIDPartCount, false))) +func (m *dataSourceResourceModel) setID() { + m.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{m.DataSourceID.ValueString(), m.KnowledgeBaseID.ValueString()}, dataSourceResourceIDPartCount, false))) } type dataSourceConfigurationModel struct { From 13b6988738f7a17ea4404f1954684752d3d00d13 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 2 May 2024 16:48:03 -0400 Subject: [PATCH 12/15] r/aws_bedrockagent_data_source: Set 'data_deletion_policy' after Create. --- internal/service/bedrockagent/data_source.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index 6b2b751ca96..13577117435 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -240,12 +240,16 @@ func (r *dataSourceResource) Create(ctx context.Context, request resource.Create data.DataSourceID = fwflex.StringToFramework(ctx, outputRaw.(*bedrockagent.CreateDataSourceOutput).DataSource.DataSourceId) data.setID() - if _, err := waitDataSourceCreated(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)); err != nil { + ds, err := waitDataSourceCreated(ctx, conn, data.DataSourceID.ValueString(), data.KnowledgeBaseID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + + if err != nil { response.Diagnostics.AddError(fmt.Sprintf("waiting for Bedrock Agent Data Source (%s) create", data.ID.ValueString()), err.Error()) return } + data.DataDeletionPolicy = fwtypes.StringEnumValue(ds.DataDeletionPolicy) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } From 6f67a45b86b238f2e5e50dc78059fd49784f0ff9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 3 May 2024 08:06:55 -0400 Subject: [PATCH 13/15] Add 'testAccDataSource_update'. --- .../service/bedrockagent/bedrockagent_test.go | 1 + internal/service/bedrockagent/data_source.go | 4 - .../service/bedrockagent/data_source_test.go | 88 ++++++++++++++++++- 3 files changed, 88 insertions(+), 5 deletions(-) diff --git a/internal/service/bedrockagent/bedrockagent_test.go b/internal/service/bedrockagent/bedrockagent_test.go index ac69a9db24e..4e8142156d4 100644 --- a/internal/service/bedrockagent/bedrockagent_test.go +++ b/internal/service/bedrockagent/bedrockagent_test.go @@ -24,6 +24,7 @@ func TestAccBedrockAgent_serial(t *testing.T) { "basic": testAccDataSource_basic, "disappears": testAccDataSource_disappears, "full": testAccDataSource_full, + "update": testAccDataSource_update, }, } diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index 13577117435..cdb2a4c3943 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -48,10 +48,6 @@ func newDataSourceResource(_ context.Context) (resource.ResourceWithConfigure, e return r, nil } -const ( - ResNameDataSource = "Data Source" -) - type dataSourceResource struct { framework.ResourceWithConfigure framework.WithImportByID diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index 08c58370bf0..0dc8e3497a7 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -157,6 +157,90 @@ func testAccDataSource_disappears(t *testing.T) { }) } +// Prerequisites: +// * psql run via null_resource/provisioner "local-exec" +// * jq for parsing output from aws cli to retrieve postgres password +func testAccDataSource_update(t *testing.T) { + acctest.SkipIfExeNotOnPath(t, "psql") + acctest.SkipIfExeNotOnPath(t, "jq") + acctest.SkipIfExeNotOnPath(t, "aws") + + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dataSource types.DataSource + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_bedrockagent_data_source.test" + foundationModel := "amazon.titan-embed-text-v1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.BedrockAgentServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ExternalProviders: map[string]resource.ExternalProvider{ + "null": { + Source: "hashicorp/null", + VersionConstraint: "3.2.2", + }, + }, + CheckDestroy: testAccCheckDataSourceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceConfig_basic(rName, foundationModel), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDataSourceExists(ctx, resourceName, &dataSource), + resource.TestCheckResourceAttrSet(resourceName, "data_deletion_policy"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "data_source_configuration.0.s3_configuration.0.bucket_arn"), + resource.TestCheckNoResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.0.bucket_owner_account_id"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.#", "0"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.type", "S3"), + resource.TestCheckResourceAttrSet(resourceName, "data_source_id"), + resource.TestCheckNoResourceAttr(resourceName, "description"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.#", "0"), + ), + }, + { + Config: testAccDataSourceConfig_full(rName, foundationModel), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDataSourceExists(ctx, resourceName, &dataSource), + resource.TestCheckResourceAttr(resourceName, "data_deletion_policy", "RETAIN"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "data_source_configuration.0.s3_configuration.0.bucket_arn"), + resource.TestCheckNoResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.0.bucket_owner_account_id"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.#", "2"), + resource.TestCheckTypeSetElemAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.*", "Europe/France/Nouvelle-Aquitaine/Bordeaux"), + resource.TestCheckTypeSetElemAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.*", "North America/USA/Washington/Seattle"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.type", "S3"), + resource.TestCheckResourceAttrSet(resourceName, "data_source_id"), + resource.TestCheckResourceAttr(resourceName, "description", "testing"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.chunking_strategy", "FIXED_SIZE"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.fixed_size_chunking_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.fixed_size_chunking_configuration.0.max_tokens", "3"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.fixed_size_chunking_configuration.0.overlap_percentage", "80"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckDataSourceDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).BedrockAgentClient(ctx) @@ -235,12 +319,14 @@ resource "aws_bedrockagent_data_source" "test" { name = %[1]q knowledge_base_id = aws_bedrockagent_knowledge_base.test.id data_deletion_policy = "RETAIN" + description = "testing" data_source_configuration { type = "S3" s3_configuration { - bucket_arn = aws_s3_bucket.test.arn + bucket_arn = aws_s3_bucket.test.arn + inclusion_prefixes = ["Europe/France/Nouvelle-Aquitaine/Bordeaux", "North America/USA/Washington/Seattle"] } } From 22c006b0fb108de9966a86865063db99588f1c67 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 3 May 2024 11:27:14 -0400 Subject: [PATCH 14/15] r/aws_bedrockagent_data_source: Fixes. --- internal/service/bedrockagent/data_source.go | 8 ++++++++ internal/service/bedrockagent/data_source_test.go | 5 ++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index cdb2a4c3943..87178caad64 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -71,6 +72,9 @@ func (r *dataSourceResource) Schema(ctx context.Context, request resource.Schema }, "data_source_id": schema.StringAttribute{ Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, "description": schema.StringAttribute{ Optional: true, @@ -129,6 +133,10 @@ func (r *dataSourceResource) Schema(ctx context.Context, request resource.Schema CustomType: fwtypes.SetOfStringType, ElementType: types.StringType, Optional: true, + Validators: []validator.Set{ + setvalidator.SizeAtMost(1), + setvalidator.ValueStringsAre(stringvalidator.LengthBetween(1, 300)), + }, }, }, }, diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index 0dc8e3497a7..3cf18c4dc4a 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -216,9 +216,8 @@ func testAccDataSource_update(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "data_source_configuration.0.s3_configuration.0.bucket_arn"), resource.TestCheckNoResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.0.bucket_owner_account_id"), - resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.#", "2"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.*", "Europe/France/Nouvelle-Aquitaine/Bordeaux"), - resource.TestCheckTypeSetElemAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.*", "North America/USA/Washington/Seattle"), resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.type", "S3"), resource.TestCheckResourceAttrSet(resourceName, "data_source_id"), resource.TestCheckResourceAttr(resourceName, "description", "testing"), @@ -326,7 +325,7 @@ resource "aws_bedrockagent_data_source" "test" { s3_configuration { bucket_arn = aws_s3_bucket.test.arn - inclusion_prefixes = ["Europe/France/Nouvelle-Aquitaine/Bordeaux", "North America/USA/Washington/Seattle"] + inclusion_prefixes = ["Europe/France/Nouvelle-Aquitaine/Bordeaux"] } } From c8d2cae642403dba94e131b7a379c401b311a981 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 3 May 2024 12:14:52 -0400 Subject: [PATCH 15/15] r/aws_bedrockagent_data_source: 'vector_ingestion_configuration' is ForceNew. --- internal/service/bedrockagent/data_source.go | 20 ++++++++ .../service/bedrockagent/data_source_test.go | 49 ++++++++++++++++--- 2 files changed, 61 insertions(+), 8 deletions(-) diff --git a/internal/service/bedrockagent/data_source.go b/internal/service/bedrockagent/data_source.go index 87178caad64..356bbfcb8c5 100644 --- a/internal/service/bedrockagent/data_source.go +++ b/internal/service/bedrockagent/data_source.go @@ -20,6 +20,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" @@ -164,6 +166,9 @@ func (r *dataSourceResource) Schema(ctx context.Context, request resource.Schema }), "vector_ingestion_configuration": schema.ListNestedBlock{ CustomType: fwtypes.NewListNestedObjectTypeOf[vectorIngestionConfigurationModel](ctx), + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -171,6 +176,9 @@ func (r *dataSourceResource) Schema(ctx context.Context, request resource.Schema Blocks: map[string]schema.Block{ "chunking_configuration": schema.ListNestedBlock{ CustomType: fwtypes.NewListNestedObjectTypeOf[chunkingConfigurationModel](ctx), + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -179,11 +187,17 @@ func (r *dataSourceResource) Schema(ctx context.Context, request resource.Schema "chunking_strategy": schema.StringAttribute{ CustomType: fwtypes.StringEnumType[awstypes.ChunkingStrategy](), Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, }, }, Blocks: map[string]schema.Block{ "fixed_size_chunking_configuration": schema.ListNestedBlock{ CustomType: fwtypes.NewListNestedObjectTypeOf[fixedSizeChunkingConfigurationModel](ctx), + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, Validators: []validator.List{ listvalidator.SizeAtMost(1), }, @@ -191,12 +205,18 @@ func (r *dataSourceResource) Schema(ctx context.Context, request resource.Schema Attributes: map[string]schema.Attribute{ "max_tokens": schema.Int64Attribute{ Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.RequiresReplace(), + }, Validators: []validator.Int64{ int64validator.AtLeast(1), }, }, "overlap_percentage": schema.Int64Attribute{ Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.RequiresReplace(), + }, Validators: []validator.Int64{ int64validator.Between(1, 99), }, diff --git a/internal/service/bedrockagent/data_source_test.go b/internal/service/bedrockagent/data_source_test.go index 3cf18c4dc4a..2a09c5f19dd 100644 --- a/internal/service/bedrockagent/data_source_test.go +++ b/internal/service/bedrockagent/data_source_test.go @@ -100,8 +100,26 @@ func testAccDataSource_full(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDataSourceConfig_full(rName, foundationModel), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckDataSourceExists(ctx, resourceName, &dataSource), + resource.TestCheckResourceAttr(resourceName, "data_deletion_policy", "RETAIN"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "data_source_configuration.0.s3_configuration.0.bucket_arn"), + resource.TestCheckNoResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.0.bucket_owner_account_id"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "data_source_configuration.0.s3_configuration.0.inclusion_prefixes.*", "Europe/France/Nouvelle-Aquitaine/Bordeaux"), + resource.TestCheckResourceAttr(resourceName, "data_source_configuration.0.type", "S3"), + resource.TestCheckResourceAttrSet(resourceName, "data_source_id"), + resource.TestCheckResourceAttr(resourceName, "description", "testing"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.chunking_strategy", "FIXED_SIZE"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.fixed_size_chunking_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.fixed_size_chunking_configuration.0.max_tokens", "3"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.fixed_size_chunking_configuration.0.overlap_percentage", "80"), ), }, { @@ -208,7 +226,7 @@ func testAccDataSource_update(t *testing.T) { ), }, { - Config: testAccDataSourceConfig_full(rName, foundationModel), + Config: testAccDataSourceConfig_updated(rName, foundationModel), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckDataSourceExists(ctx, resourceName, &dataSource), resource.TestCheckResourceAttr(resourceName, "data_deletion_policy", "RETAIN"), @@ -223,12 +241,7 @@ func testAccDataSource_update(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "description", "testing"), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "0"), - resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.chunking_strategy", "FIXED_SIZE"), - resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.fixed_size_chunking_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.fixed_size_chunking_configuration.0.max_tokens", "3"), - resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.0.chunking_configuration.0.fixed_size_chunking_configuration.0.overlap_percentage", "80"), + resource.TestCheckResourceAttr(resourceName, "vector_ingestion_configuration.#", "0"), ), }, { @@ -342,3 +355,23 @@ resource "aws_bedrockagent_data_source" "test" { } `, rName)) } + +func testAccDataSourceConfig_updated(rName, embeddingModel string) string { + return acctest.ConfigCompose(testAccDataSourceConfig_base(rName, embeddingModel), fmt.Sprintf(` +resource "aws_bedrockagent_data_source" "test" { + name = %[1]q + knowledge_base_id = aws_bedrockagent_knowledge_base.test.id + data_deletion_policy = "RETAIN" + description = "testing" + + data_source_configuration { + type = "S3" + + s3_configuration { + bucket_arn = aws_s3_bucket.test.arn + inclusion_prefixes = ["Europe/France/Nouvelle-Aquitaine/Bordeaux"] + } + } +} +`, rName)) +}