From 0fc9a8404302237f7ad3943827ae3c28ae9eeb9b Mon Sep 17 00:00:00 2001 From: Anthony Wat Date: Sun, 7 Jul 2024 02:36:58 -0400 Subject: [PATCH 01/16] feat: Add tags support to aws_mskconnect resources and data sources --- .changelog/38270.txt | 27 +++ internal/service/kafkaconnect/connector.go | 35 ++-- .../kafkaconnect/connector_data_source.go | 3 + .../connector_data_source_test.go | 1 + .../service/kafkaconnect/connector_test.go | 174 +++++++++++++++++- .../service/kafkaconnect/custom_plugin.go | 16 ++ .../kafkaconnect/custom_plugin_data_source.go | 3 + .../custom_plugin_data_source_test.go | 5 + .../kafkaconnect/custom_plugin_test.go | 86 +++++++++ internal/service/kafkaconnect/generate.go | 1 + .../kafkaconnect/service_package_gen.go | 18 ++ internal/service/kafkaconnect/status.go | 16 ++ internal/service/kafkaconnect/tags_gen.go | 129 +++++++++++++ internal/service/kafkaconnect/wait.go | 17 ++ .../kafkaconnect/worker_configuration.go | 52 +++++- .../worker_configuration_data_source.go | 3 + .../worker_configuration_data_source_test.go | 5 + .../kafkaconnect/worker_configuration_test.go | 134 +++++++++++++- .../docs/d/mskconnect_connector.html.markdown | 1 + .../d/mskconnect_custom_plugin.html.markdown | 1 + ...connect_worker_configuration.html.markdown | 1 + .../docs/r/mskconnect_connector.html.markdown | 117 ++++++++---- .../r/mskconnect_custom_plugin.html.markdown | 26 ++- ...connect_worker_configuration.html.markdown | 14 +- 24 files changed, 817 insertions(+), 68 deletions(-) create mode 100644 .changelog/38270.txt create mode 100644 internal/service/kafkaconnect/tags_gen.go diff --git a/.changelog/38270.txt b/.changelog/38270.txt new file mode 100644 index 00000000000..e00bc9d89a7 --- /dev/null +++ b/.changelog/38270.txt @@ -0,0 +1,27 @@ +```release-note:enhancement +resource/aws_mskconnect_connector: Add `tags` argument and `tags_all` attribute +``` + +```release-note:enhancement +resource/aws_mskconnect_custom_plugin: Add `tags` argument and `tags_all` attribute +``` + +```release-note:enhancement +resource/aws_mskconnect_worker_configuration: Add `tags` argument and `tags_all` attribute +``` + +```release-note:enhancement +resource/aws_mskconnect_worker_configuration: Add resource deletion logic +``` + +```release-note:enhancement +data-source/aws_mskconnect_connector: Add `tags` attribute +``` + +```release-note:enhancement +data-source/aws_mskconnect_custom_plugin: Add `tags` attribute +``` + +```release-note:enhancement +data-source/aws_mskconnect_worker_configuration: Add `tags` attribute +``` diff --git a/internal/service/kafkaconnect/connector.go b/internal/service/kafkaconnect/connector.go index 9f53e2c9ade..ea7837c2a8b 100644 --- a/internal/service/kafkaconnect/connector.go +++ b/internal/service/kafkaconnect/connector.go @@ -17,12 +17,14 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_mskconnect_connector") +// @Tags(identifierAttribute="arn") func ResourceConnector() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceConnectorCreate, @@ -358,6 +360,8 @@ func ResourceConnector() *schema.Resource { ForceNew: true, ValidateFunc: verify.ValidARN, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), names.AttrVersion: { Type: schema.TypeString, Computed: true, @@ -384,6 +388,8 @@ func ResourceConnector() *schema.Resource { }, }, }, + + CustomizeDiff: verify.SetTagsDiff, } } @@ -403,6 +409,7 @@ func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta i KafkaConnectVersion: aws.String(d.Get("kafkaconnect_version").(string)), Plugins: expandPlugins(d.Get("plugin").(*schema.Set).List()), ServiceExecutionRoleArn: aws.String(d.Get("service_execution_role_arn").(string)), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -513,23 +520,25 @@ func resourceConnectorUpdate(ctx context.Context, d *schema.ResourceData, meta i conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - input := &kafkaconnect.UpdateConnectorInput{ - Capacity: expandCapacityUpdate(d.Get("capacity").([]interface{})[0].(map[string]interface{})), - ConnectorArn: aws.String(d.Id()), - CurrentVersion: aws.String(d.Get(names.AttrVersion).(string)), - } + if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + input := &kafkaconnect.UpdateConnectorInput{ + Capacity: expandCapacityUpdate(d.Get("capacity").([]interface{})[0].(map[string]interface{})), + ConnectorArn: aws.String(d.Id()), + CurrentVersion: aws.String(d.Get(names.AttrVersion).(string)), + } - log.Printf("[DEBUG] Updating MSK Connect Connector: %s", input) - _, err := conn.UpdateConnectorWithContext(ctx, input) + log.Printf("[DEBUG] Updating MSK Connect Connector: %s", input) + _, err := conn.UpdateConnectorWithContext(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating MSK Connect Connector (%s): %s", d.Id(), err) - } + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating MSK Connect Connector (%s): %s", d.Id(), err) + } - _, err = waitConnectorUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + _, err = waitConnectorUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Connector (%s) update: %s", d.Id(), err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Connector (%s) update: %s", d.Id(), err) + } } return append(diags, resourceConnectorRead(ctx, d, meta)...) diff --git a/internal/service/kafkaconnect/connector_data_source.go b/internal/service/kafkaconnect/connector_data_source.go index 7526daeb9f9..2ba8a5820a6 100644 --- a/internal/service/kafkaconnect/connector_data_source.go +++ b/internal/service/kafkaconnect/connector_data_source.go @@ -12,11 +12,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKDataSource("aws_mskconnect_connector") +// @Tags(identifierAttribute="arn") func DataSourceConnector() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceConnectorRead, @@ -38,6 +40,7 @@ func DataSourceConnector() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchemaComputed(), }, } } diff --git a/internal/service/kafkaconnect/connector_data_source_test.go b/internal/service/kafkaconnect/connector_data_source_test.go index 6c7abd35e50..036628be5b6 100644 --- a/internal/service/kafkaconnect/connector_data_source_test.go +++ b/internal/service/kafkaconnect/connector_data_source_test.go @@ -32,6 +32,7 @@ func TestAccKafkaConnectConnectorDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, names.AttrDescription, dataSourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrName), resource.TestCheckResourceAttrPair(resourceName, names.AttrVersion, dataSourceName, names.AttrVersion), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTags, dataSourceName, names.AttrTags), ), }, }, diff --git a/internal/service/kafkaconnect/connector_test.go b/internal/service/kafkaconnect/connector_test.go index 6ccfd01e66a..12f854c9769 100644 --- a/internal/service/kafkaconnect/connector_test.go +++ b/internal/service/kafkaconnect/connector_test.go @@ -68,6 +68,8 @@ func TestAccKafkaConnectConnector_basic(t *testing.T) { "custom_plugin.#": acctest.Ct1, }), resource.TestCheckResourceAttrSet(resourceName, "service_execution_role_arn"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), resource.TestCheckResourceAttrSet(resourceName, names.AttrVersion), resource.TestCheckResourceAttr(resourceName, "worker_configuration.#", acctest.Ct0), ), @@ -229,6 +231,51 @@ func TestAccKafkaConnectConnector_update(t *testing.T) { }) } +func TestAccKafkaConnectConnector_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_mskconnect_connector.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), + CheckDestroy: testAccCheckConnectorDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccConnectorConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccConnectorConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccConnectorConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + func testAccCheckConnectorExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -396,7 +443,7 @@ EOF resource "aws_msk_cluster" "test" { cluster_name = %[1]q - kafka_version = "2.2.1" + kafka_version = "2.7.1" number_of_broker_nodes = 3 broker_node_group_info { @@ -465,6 +512,10 @@ resource "aws_mskconnect_connector" "test" { service_execution_role_arn = aws_iam_role.test.arn + tags = { + key1 = "value1" + } + depends_on = [aws_iam_role_policy.test, aws_vpc_endpoint.test] } `, rName)) @@ -643,3 +694,124 @@ resource "aws_mskconnect_connector" "test" { } `, rName)) } + +func testAccConnectorConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose( + testAccCustomPluginConfig_basic(rName), + testAccConnectorBaseConfig(rName), + fmt.Sprintf(` +resource "aws_mskconnect_connector" "test" { + name = %[1]q + + kafkaconnect_version = "2.7.1" + + capacity { + autoscaling { + min_worker_count = 1 + max_worker_count = 2 + } + } + + connector_configuration = { + "connector.class" = "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector" + "tasks.max" = "1" + "topics" = "t1" + } + + kafka_cluster { + apache_kafka_cluster { + bootstrap_servers = aws_msk_cluster.test.bootstrap_brokers_tls + + vpc { + security_groups = [aws_security_group.test.id] + subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + } + } + } + + kafka_cluster_client_authentication { + authentication_type = "NONE" + } + + kafka_cluster_encryption_in_transit { + encryption_type = "TLS" + } + + plugin { + custom_plugin { + arn = aws_mskconnect_custom_plugin.test.arn + revision = aws_mskconnect_custom_plugin.test.latest_revision + } + } + + service_execution_role_arn = aws_iam_role.test.arn + + tags = { + %[2]q = %[3]q + } + + depends_on = [aws_iam_role_policy.test, aws_vpc_endpoint.test] +} +`, rName, tagKey1, tagValue1)) +} + +func testAccConnectorConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose( + testAccCustomPluginConfig_basic(rName), + testAccConnectorBaseConfig(rName), + fmt.Sprintf(` +resource "aws_mskconnect_connector" "test" { + name = %[1]q + + kafkaconnect_version = "2.7.1" + + capacity { + autoscaling { + min_worker_count = 1 + max_worker_count = 2 + } + } + + connector_configuration = { + "connector.class" = "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector" + "tasks.max" = "1" + "topics" = "t1" + } + + kafka_cluster { + apache_kafka_cluster { + bootstrap_servers = aws_msk_cluster.test.bootstrap_brokers_tls + + vpc { + security_groups = [aws_security_group.test.id] + subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + } + } + } + + kafka_cluster_client_authentication { + authentication_type = "NONE" + } + + kafka_cluster_encryption_in_transit { + encryption_type = "TLS" + } + + plugin { + custom_plugin { + arn = aws_mskconnect_custom_plugin.test.arn + revision = aws_mskconnect_custom_plugin.test.latest_revision + } + } + + service_execution_role_arn = aws_iam_role.test.arn + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } + + depends_on = [aws_iam_role_policy.test, aws_vpc_endpoint.test] +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/internal/service/kafkaconnect/custom_plugin.go b/internal/service/kafkaconnect/custom_plugin.go index 2ec5438e504..e2b0884d748 100644 --- a/internal/service/kafkaconnect/custom_plugin.go +++ b/internal/service/kafkaconnect/custom_plugin.go @@ -16,16 +16,19 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_mskconnect_custom_plugin") +// @Tags(identifierAttribute="arn") func ResourceCustomPlugin() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCustomPluginCreate, ReadWithoutTimeout: resourceCustomPluginRead, + UpdateWithoutTimeout: resourceCustomPluginUpdate, DeleteWithoutTimeout: resourceCustomPluginDelete, Importer: &schema.ResourceImporter{ @@ -102,7 +105,11 @@ func ResourceCustomPlugin() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), }, + + CustomizeDiff: verify.SetTagsDiff, } } @@ -116,6 +123,7 @@ func resourceCustomPluginCreate(ctx context.Context, d *schema.ResourceData, met ContentType: aws.String(d.Get(names.AttrContentType).(string)), Location: expandCustomPluginLocation(d.Get(names.AttrLocation).([]interface{})[0].(map[string]interface{})), Name: aws.String(name), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -181,6 +189,14 @@ func resourceCustomPluginRead(ctx context.Context, d *schema.ResourceData, meta return diags } +func resourceCustomPluginUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + // This update function is for updating tags only - there is no update action for this resource + + return append(diags, resourceCustomPluginRead(ctx, d, meta)...) +} + func resourceCustomPluginDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics diff --git a/internal/service/kafkaconnect/custom_plugin_data_source.go b/internal/service/kafkaconnect/custom_plugin_data_source.go index 792bd560029..f4dd6909226 100644 --- a/internal/service/kafkaconnect/custom_plugin_data_source.go +++ b/internal/service/kafkaconnect/custom_plugin_data_source.go @@ -12,11 +12,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKDataSource("aws_mskconnect_custom_plugin") +// @Tags(identifierAttribute="arn") func DataSourceCustomPlugin() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceCustomPluginRead, @@ -42,6 +44,7 @@ func DataSourceCustomPlugin() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchemaComputed(), }, } } diff --git a/internal/service/kafkaconnect/custom_plugin_data_source_test.go b/internal/service/kafkaconnect/custom_plugin_data_source_test.go index b6ba398afc9..872c735aa5f 100644 --- a/internal/service/kafkaconnect/custom_plugin_data_source_test.go +++ b/internal/service/kafkaconnect/custom_plugin_data_source_test.go @@ -34,6 +34,7 @@ func TestAccKafkaConnectCustomPluginDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "latest_revision", dataSourceName, "latest_revision"), resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrName), resource.TestCheckResourceAttrPair(resourceName, names.AttrState, dataSourceName, names.AttrState), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTags, dataSourceName, names.AttrTags), ), }, }, @@ -52,6 +53,10 @@ resource "aws_mskconnect_custom_plugin" "test" { file_key = aws_s3_object.test.key } } + + tags = { + key1 = "value1" + } } data "aws_mskconnect_custom_plugin" "test" { diff --git a/internal/service/kafkaconnect/custom_plugin_test.go b/internal/service/kafkaconnect/custom_plugin_test.go index 86a6fc1ee85..3e8ab798303 100644 --- a/internal/service/kafkaconnect/custom_plugin_test.go +++ b/internal/service/kafkaconnect/custom_plugin_test.go @@ -106,6 +106,51 @@ func TestAccKafkaConnectCustomPlugin_description(t *testing.T) { }) } +func TestAccKafkaConnectCustomPlugin_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_mskconnect_custom_plugin.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), + CheckDestroy: testAccCheckCustomPluginDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccCustomPluginConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPluginExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCustomPluginConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPluginExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccCustomPluginConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPluginExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + func TestAccKafkaConnectCustomPlugin_objectVersion(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -251,3 +296,44 @@ resource "aws_mskconnect_custom_plugin" "test" { } `, rName)) } + +func testAccCustomPluginConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccCustomPluginBaseConfig(rName, false), fmt.Sprintf(` +resource "aws_mskconnect_custom_plugin" "test" { + name = %[1]q + content_type = "ZIP" + + location { + s3 { + bucket_arn = aws_s3_bucket.test.arn + file_key = aws_s3_object.test.key + } + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccCustomPluginConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccCustomPluginBaseConfig(rName, false), fmt.Sprintf(` +resource "aws_mskconnect_custom_plugin" "test" { + name = %[1]q + content_type = "ZIP" + + location { + s3 { + bucket_arn = aws_s3_bucket.test.arn + file_key = aws_s3_object.test.key + } + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/internal/service/kafkaconnect/generate.go b/internal/service/kafkaconnect/generate.go index aeb7bedb679..dc3570a57c3 100644 --- a/internal/service/kafkaconnect/generate.go +++ b/internal/service/kafkaconnect/generate.go @@ -2,6 +2,7 @@ // SPDX-License-Identifier: MPL-2.0 //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/tags/main.go -ServiceTagsMap -ListTags -UpdateTags // ONLY generate directives and package declaration! Do not add anything else to this file. package kafkaconnect diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index 034abdf10f6..a9e3982d9a8 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -29,14 +29,23 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac { Factory: DataSourceConnector, TypeName: "aws_mskconnect_connector", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, { Factory: DataSourceCustomPlugin, TypeName: "aws_mskconnect_custom_plugin", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, { Factory: DataSourceWorkerConfiguration, TypeName: "aws_mskconnect_worker_configuration", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, } } @@ -46,14 +55,23 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka { Factory: ResourceConnector, TypeName: "aws_mskconnect_connector", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, { Factory: ResourceCustomPlugin, TypeName: "aws_mskconnect_custom_plugin", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, { Factory: ResourceWorkerConfiguration, TypeName: "aws_mskconnect_worker_configuration", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, } } diff --git a/internal/service/kafkaconnect/status.go b/internal/service/kafkaconnect/status.go index acb2f5e6596..ecd768f427f 100644 --- a/internal/service/kafkaconnect/status.go +++ b/internal/service/kafkaconnect/status.go @@ -43,3 +43,19 @@ func statusCustomPluginState(ctx context.Context, conn *kafkaconnect.KafkaConnec return output, aws.StringValue(output.CustomPluginState), nil } } + +func statusWorkerConfigurationState(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindWorkerConfigurationByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.WorkerConfigurationState), nil + } +} diff --git a/internal/service/kafkaconnect/tags_gen.go b/internal/service/kafkaconnect/tags_gen.go new file mode 100644 index 00000000000..2d0344981e5 --- /dev/null +++ b/internal/service/kafkaconnect/tags_gen.go @@ -0,0 +1,129 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package kafkaconnect + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go/service/kafkaconnect/kafkaconnectiface" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists kafkaconnect service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn kafkaconnectiface.KafkaConnectAPI, identifier string) (tftags.KeyValueTags, error) { + input := &kafkaconnect.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResourceWithContext(ctx, input) + + if err != nil { + return tftags.New(ctx, nil), err + } + + return KeyValueTags(ctx, output.Tags), nil +} + +// ListTags lists kafkaconnect service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).KafkaConnectConn(ctx), identifier) + + if err != nil { + return err + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + +// map[string]*string handling + +// Tags returns kafkaconnect service tags. +func Tags(tags tftags.KeyValueTags) map[string]*string { + return aws.StringMap(tags.Map()) +} + +// KeyValueTags creates tftags.KeyValueTags from kafkaconnect service tags. +func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { + return tftags.New(ctx, tags) +} + +// getTagsIn returns kafkaconnect service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) map[string]*string { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets kafkaconnect service tags in Context. +func setTagsOut(ctx context.Context, tags map[string]*string) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) + } +} + +// updateTags updates kafkaconnect service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn kafkaconnectiface.KafkaConnectAPI, identifier string, oldTagsMap, newTagsMap any) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.KafkaConnect) + if len(removedTags) > 0 { + input := &kafkaconnect.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: aws.StringSlice(removedTags.Keys()), + } + + _, err := conn.UntagResourceWithContext(ctx, input) + + if err != nil { + return fmt.Errorf("untagging resource (%s): %w", identifier, err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.KafkaConnect) + if len(updatedTags) > 0 { + input := &kafkaconnect.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: Tags(updatedTags), + } + + _, err := conn.TagResourceWithContext(ctx, input) + + if err != nil { + return fmt.Errorf("tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// UpdateTags updates kafkaconnect service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).KafkaConnectConn(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/kafkaconnect/wait.go b/internal/service/kafkaconnect/wait.go index 944a30c5c44..4dca5c5366b 100644 --- a/internal/service/kafkaconnect/wait.go +++ b/internal/service/kafkaconnect/wait.go @@ -114,3 +114,20 @@ func waitCustomPluginDeleted(ctx context.Context, conn *kafkaconnect.KafkaConnec return nil, err } + +func waitWorkerConfigurationDeleted(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{kafkaconnect.WorkerConfigurationStateDeleting}, + Target: []string{}, + Refresh: statusWorkerConfigurationState(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeWorkerConfigurationOutput); ok { + return output, err + } + + return nil, err +} diff --git a/internal/service/kafkaconnect/worker_configuration.go b/internal/service/kafkaconnect/worker_configuration.go index ced642449ad..7f76fee9b8a 100644 --- a/internal/service/kafkaconnect/worker_configuration.go +++ b/internal/service/kafkaconnect/worker_configuration.go @@ -6,30 +6,40 @@ package kafkaconnect import ( "context" "log" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" itypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_mskconnect_worker_configuration") +// @Tags(identifierAttribute="arn") func ResourceWorkerConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceWorkerConfigurationCreate, ReadWithoutTimeout: resourceWorkerConfigurationRead, - DeleteWithoutTimeout: schema.NoopContext, + UpdateWithoutTimeout: resourceWorkerConfigurationUpdate, + DeleteWithoutTimeout: resourceWorkerConfigurationDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -62,7 +72,11 @@ func ResourceWorkerConfiguration() *schema.Resource { } }, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), }, + + CustomizeDiff: verify.SetTagsDiff, } } @@ -75,6 +89,7 @@ func resourceWorkerConfigurationCreate(ctx context.Context, d *schema.ResourceDa input := &kafkaconnect.CreateWorkerConfigurationInput{ Name: aws.String(name), PropertiesFileContent: flex.StringValueToBase64String(d.Get("properties_file_content").(string)), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -125,6 +140,14 @@ func resourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceData return diags } +func resourceWorkerConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + // This update function is for updating tags only - there is no update action for this resource + + return append(diags, resourceWorkerConfigurationRead(ctx, d, meta)...) +} + func decodePropertiesFileContent(content string) string { v, err := itypes.Base64Decode(content) if err != nil { @@ -133,3 +156,30 @@ func decodePropertiesFileContent(content string) string { return string(v) } + +func resourceWorkerConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + + log.Printf("[DEBUG] Deleting MSK Connect Worker Configuration: %s", d.Id()) + _, err := conn.DeleteWorkerConfigurationWithContext(ctx, &kafkaconnect.DeleteWorkerConfigurationInput{ + WorkerConfigurationArn: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting MSK Connect Worker Configuration (%s): %s", d.Id(), err) + } + + _, err = waitWorkerConfigurationDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Worker Configuration (%s) delete: %s", d.Id(), err) + } + + return diags +} diff --git a/internal/service/kafkaconnect/worker_configuration_data_source.go b/internal/service/kafkaconnect/worker_configuration_data_source.go index 6f993a75512..73c4d073e69 100644 --- a/internal/service/kafkaconnect/worker_configuration_data_source.go +++ b/internal/service/kafkaconnect/worker_configuration_data_source.go @@ -12,11 +12,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKDataSource("aws_mskconnect_worker_configuration") +// @Tags(identifierAttribute="arn") func DataSourceWorkerConfiguration() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceWorkerConfigurationRead, @@ -42,6 +44,7 @@ func DataSourceWorkerConfiguration() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchemaComputed(), }, } } diff --git a/internal/service/kafkaconnect/worker_configuration_data_source_test.go b/internal/service/kafkaconnect/worker_configuration_data_source_test.go index fbff029753e..9392249f2d7 100644 --- a/internal/service/kafkaconnect/worker_configuration_data_source_test.go +++ b/internal/service/kafkaconnect/worker_configuration_data_source_test.go @@ -34,6 +34,7 @@ func TestAccKafkaConnectWorkerConfigurationDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "latest_revision", dataSourceName, "latest_revision"), resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrName), resource.TestCheckResourceAttrPair(resourceName, "properties_file_content", dataSourceName, "properties_file_content"), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTags, dataSourceName, names.AttrTags), ), }, }, @@ -49,6 +50,10 @@ resource "aws_mskconnect_worker_configuration" "test" { key.converter=org.apache.kafka.connect.storage.StringConverter value.converter=org.apache.kafka.connect.storage.StringConverter EOF + + tags = { + key1 = "value1" + } } data "aws_mskconnect_worker_configuration" "test" { diff --git a/internal/service/kafkaconnect/worker_configuration_test.go b/internal/service/kafkaconnect/worker_configuration_test.go index 10123831093..939b05eb9e0 100644 --- a/internal/service/kafkaconnect/worker_configuration_test.go +++ b/internal/service/kafkaconnect/worker_configuration_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfkafkaconnect "github.com/hashicorp/terraform-provider-aws/internal/service/kafkaconnect" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +27,7 @@ func TestAccKafkaConnectWorkerConfiguration_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, + CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -48,6 +49,29 @@ func TestAccKafkaConnectWorkerConfiguration_basic(t *testing.T) { }) } +func TestAccKafkaConnectWorkerConfiguration_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_mskconnect_worker_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), + CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccWorkerConfigurationConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkerConfigurationExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfkafkaconnect.ResourceWorkerConfiguration(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccKafkaConnectWorkerConfiguration_description(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -56,7 +80,7 @@ func TestAccKafkaConnectWorkerConfiguration_description(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, + CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -75,6 +99,51 @@ func TestAccKafkaConnectWorkerConfiguration_description(t *testing.T) { }) } +func TestAccKafkaConnectWorkerConfiguration_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_mskconnect_worker_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), + CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccWorkerConfigurationConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkerConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccWorkerConfigurationConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkerConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccWorkerConfigurationConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkerConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + func testAccCheckWorkerConfigurationExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -94,6 +163,32 @@ func testAccCheckWorkerConfigurationExists(ctx context.Context, n string) resour } } +func testAccCheckWorkerConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_mskconnect_worker_configuration" { + continue + } + + _, err := tfkafkaconnect.FindWorkerConfigurationByARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("MSK Connect Worker Configuration %s still exists", rs.Primary.ID) + } + + return nil + } +} + func testAccWorkerConfigurationConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_mskconnect_worker_configuration" "test" { @@ -120,3 +215,38 @@ EOF } `, rName) } + +func testAccWorkerConfigurationConfig_tags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_mskconnect_worker_configuration" "test" { + name = %[1]q + + properties_file_content = < Date: Mon, 8 Jul 2024 08:47:42 -0400 Subject: [PATCH 02/16] kafkaconnect: Use AWS SDK for Go v2. --- internal/service/kafkaconnect/generate.go | 2 +- names/data/names_data.hcl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/kafkaconnect/generate.go b/internal/service/kafkaconnect/generate.go index dc3570a57c3..4d34ba3cbac 100644 --- a/internal/service/kafkaconnect/generate.go +++ b/internal/service/kafkaconnect/generate.go @@ -2,7 +2,7 @@ // SPDX-License-Identifier: MPL-2.0 //go:generate go run ../../generate/servicepackage/main.go -//go:generate go run ../../generate/tags/main.go -ServiceTagsMap -ListTags -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -SkipTypesImp=true -ServiceTagsMap -ListTags -UpdateTags // ONLY generate directives and package declaration! Do not add anything else to this file. package kafkaconnect diff --git a/names/data/names_data.hcl b/names/data/names_data.hcl index 3a398a818dd..44c32042620 100644 --- a/names/data/names_data.hcl +++ b/names/data/names_data.hcl @@ -5941,7 +5941,7 @@ service "kafkaconnect" { sdk { id = "KafkaConnect" - client_version = [1] + client_version = [2] } names { From 7cd7412972742be9e4290974f351b37c50783726 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 08:48:00 -0400 Subject: [PATCH 03/16] Run 'make gen'. --- internal/conns/awsclient_gen.go | 6 +- .../service_endpoint_resolver_gen.go | 66 ++++---- .../service_endpoints_gen_test.go | 146 +++++++++++++++--- .../kafkaconnect/service_package_gen.go | 28 ++-- internal/service/kafkaconnect/tags_gen.go | 21 ++- 5 files changed, 179 insertions(+), 88 deletions(-) diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 47d9d2250f8..926c79c23a5 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -112,6 +112,7 @@ import ( iotevents_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotevents" ivschat_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ivschat" kafka_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafka" + kafkaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" kendra_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kendra" keyspaces_sdkv2 "github.com/aws/aws-sdk-go-v2/service/keyspaces" kinesis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kinesis" @@ -223,7 +224,6 @@ import ( imagebuilder_sdkv1 "github.com/aws/aws-sdk-go/service/imagebuilder" inspector_sdkv1 "github.com/aws/aws-sdk-go/service/inspector" ivs_sdkv1 "github.com/aws/aws-sdk-go/service/ivs" - kafkaconnect_sdkv1 "github.com/aws/aws-sdk-go/service/kafkaconnect" kinesisanalytics_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisanalytics" kinesisanalyticsv2_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisanalyticsv2" kinesisvideo_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisvideo" @@ -794,8 +794,8 @@ func (c *AWSClient) KafkaClient(ctx context.Context) *kafka_sdkv2.Client { return errs.Must(client[*kafka_sdkv2.Client](ctx, c, names.Kafka, make(map[string]any))) } -func (c *AWSClient) KafkaConnectConn(ctx context.Context) *kafkaconnect_sdkv1.KafkaConnect { - return errs.Must(conn[*kafkaconnect_sdkv1.KafkaConnect](ctx, c, names.KafkaConnect, make(map[string]any))) +func (c *AWSClient) KafkaConnectClient(ctx context.Context) *kafkaconnect_sdkv2.Client { + return errs.Must(client[*kafkaconnect_sdkv2.Client](ctx, c, names.KafkaConnect, make(map[string]any))) } func (c *AWSClient) KendraClient(ctx context.Context) *kendra_sdkv2.Client { diff --git a/internal/service/kafkaconnect/service_endpoint_resolver_gen.go b/internal/service/kafkaconnect/service_endpoint_resolver_gen.go index 07d2eb7068a..ec9c3ea8ab5 100644 --- a/internal/service/kafkaconnect/service_endpoint_resolver_gen.go +++ b/internal/service/kafkaconnect/service_endpoint_resolver_gen.go @@ -6,65 +6,63 @@ import ( "context" "fmt" "net" - "net/url" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + kafkaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -var _ endpoints_sdkv1.Resolver = resolverSDKv1{} +var _ kafkaconnect_sdkv2.EndpointResolverV2 = resolverSDKv2{} -type resolverSDKv1 struct { - ctx context.Context +type resolverSDKv2 struct { + defaultResolver kafkaconnect_sdkv2.EndpointResolverV2 } -func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { - return resolverSDKv1{ - ctx: ctx, +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: kafkaconnect_sdkv2.NewDefaultEndpointResolverV2(), } } -func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { - ctx := r.ctx +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params kafkaconnect_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) - var opt endpoints_sdkv1.Options - opt.Set(opts...) - - useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) - defaultResolver := endpoints_sdkv1.DefaultResolver() + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } - if useFIPS { + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) - endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) if err != nil { return endpoint, err } tflog.Debug(ctx, "endpoint resolved", map[string]any{ - "tf_aws.endpoint": endpoint.URL, + "tf_aws.endpoint": endpoint.URI.String(), }) - var endpointURL *url.URL - endpointURL, err = url.Parse(endpoint.URL) - if err != nil { - return endpoint, err - } - - hostname := endpointURL.Hostname() + hostname := endpoint.URI.Hostname() _, err = net.LookupHost(hostname) if err != nil { if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ "tf_aws.hostname": hostname, }) - opts = append(opts, func(o *endpoints_sdkv1.Options) { - o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - }) + params.UseFIPS = aws_sdkv2.Bool(false) } else { - err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up kafkaconnect endpoint %q: %s", hostname, err) return } } else { @@ -72,5 +70,13 @@ func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoin } } - return defaultResolver.EndpointFor(service, region, opts...) + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*kafkaconnect_sdkv2.Options) { + return func(o *kafkaconnect_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } } diff --git a/internal/service/kafkaconnect/service_endpoints_gen_test.go b/internal/service/kafkaconnect/service_endpoints_gen_test.go index fccafb6be6a..64f13b5747d 100644 --- a/internal/service/kafkaconnect/service_endpoints_gen_test.go +++ b/internal/service/kafkaconnect/service_endpoints_gen_test.go @@ -4,18 +4,22 @@ package kafkaconnect_test import ( "context" + "errors" "fmt" "maps" "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - kafkaconnect_sdkv1 "github.com/aws/aws-sdk-go/service/kafkaconnect" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + kafkaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -240,54 +244,63 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } func defaultEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := kafkaconnect_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(kafkaconnect_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), kafkaconnect_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func defaultFIPSEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := kafkaconnect_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(kafkaconnect_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), kafkaconnect_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.KafkaConnectConn(ctx) + client := meta.KafkaConnectClient(ctx) - req, _ := client.ListConnectorsRequest(&kafkaconnect_sdkv1.ListConnectorsInput{}) + var result apiCallParams - req.HTTPRequest.URL.Path = "/" - - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListConnectors(ctx, &kafkaconnect_sdkv2.ListConnectorsInput{}, + func(opts *kafkaconnect_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -466,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index a9e3982d9a8..fc9f8a03373 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -5,10 +5,8 @@ package kafkaconnect import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - kafkaconnect_sdkv1 "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + kafkaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -80,22 +78,14 @@ func (p *servicePackage) ServicePackageName() string { return names.KafkaConnect } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*kafkaconnect_sdkv1.KafkaConnect, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*kafkaconnect_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - } else { - cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) - } - - return kafkaconnect_sdkv1.New(sess.Copy(&cfg)), nil + return kafkaconnect_sdkv2.NewFromConfig(cfg, + kafkaconnect_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/kafkaconnect/tags_gen.go b/internal/service/kafkaconnect/tags_gen.go index 2d0344981e5..eb6ea9aeeab 100644 --- a/internal/service/kafkaconnect/tags_gen.go +++ b/internal/service/kafkaconnect/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/aws/aws-sdk-go/service/kafkaconnect/kafkaconnectiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists kafkaconnect service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn kafkaconnectiface.KafkaConnectAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *kafkaconnect.Client, identifier string, optFns ...func(*kafkaconnect.Options)) (tftags.KeyValueTags, error) { input := &kafkaconnect.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn kafkaconnectiface.KafkaConnectAPI, ident // ListTags lists kafkaconnect service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).KafkaConnectConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).KafkaConnectClient(ctx), identifier) if err != nil { return err @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates kafkaconnect service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn kafkaconnectiface.KafkaConnectAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *kafkaconnect.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*kafkaconnect.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn kafkaconnectiface.KafkaConnectAPI, ide if len(removedTags) > 0 { input := &kafkaconnect.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn kafkaconnectiface.KafkaConnectAPI, ide Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn kafkaconnectiface.KafkaConnectAPI, ide // UpdateTags updates kafkaconnect service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).KafkaConnectConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).KafkaConnectClient(ctx), identifier, oldTags, newTags) } From f4f71b08e74331053d077ffd04b3344fdca87af4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 08:52:45 -0400 Subject: [PATCH 04/16] Run 'go get github.com/aws/aws-sdk-go-v2/service/kafkaconnect@v1.19.1 && go mod tidy'. --- go.mod | 1 + go.sum | 2 ++ 2 files changed, 3 insertions(+) diff --git a/go.mod b/go.mod index d071da72677..1246260c2ab 100644 --- a/go.mod +++ b/go.mod @@ -120,6 +120,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.1 github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.1 github.com/aws/aws-sdk-go-v2/service/kafka v1.35.1 + github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.1 github.com/aws/aws-sdk-go-v2/service/kendra v1.52.1 github.com/aws/aws-sdk-go-v2/service/keyspaces v1.12.1 github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.1 diff --git a/go.sum b/go.sum index c1a5460f4cc..3c6cac08837 100644 --- a/go.sum +++ b/go.sum @@ -270,6 +270,8 @@ github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.1 h1:rmVLJaE6iqVSSeipZnhul8BM github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.1/go.mod h1:IUQ9qdszWBPacNZ36JLkmOxGx/2LCzz/DOZjpg/8tz4= github.com/aws/aws-sdk-go-v2/service/kafka v1.35.1 h1:Q4Jr/gf+7LHjBFTdecQJn4ugVoVszCHzyq1EztrHHkc= github.com/aws/aws-sdk-go-v2/service/kafka v1.35.1/go.mod h1:7/xNH8gqz3k4p4OyW5+s8ecOp1Xg+vBTPMKpMNjeeik= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.1 h1:mtbyDAlKv06I6KWTcbhKEUPe51Vinm5RbcmzYI8UWPw= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.1/go.mod h1:8XzP4poz+dq07pMe/w+9707EuHp/5lP0OACGvQnVnlo= github.com/aws/aws-sdk-go-v2/service/kendra v1.52.1 h1:mVfjPIVnnSYLWnGYPrTrPlXohOMmYvQpjmYiBLnTh4M= github.com/aws/aws-sdk-go-v2/service/kendra v1.52.1/go.mod h1:mjdRASBq4jLAUxSSBk4jvXdyciq6hZl162yoJ1+BTdc= github.com/aws/aws-sdk-go-v2/service/keyspaces v1.12.1 h1:OmIPCAjROLz+AJyzvNpJRD4cenApFQJAeAIWrkRXOo8= From 7b30dee6da8eedba5bb6dacc4ece2c3401cb894b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 10:20:03 -0400 Subject: [PATCH 05/16] kafkaconnect: Correct tagging generation. --- internal/service/kafkaconnect/generate.go | 2 +- internal/service/kafkaconnect/tags_gen.go | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/service/kafkaconnect/generate.go b/internal/service/kafkaconnect/generate.go index 4d34ba3cbac..8d7866f815e 100644 --- a/internal/service/kafkaconnect/generate.go +++ b/internal/service/kafkaconnect/generate.go @@ -2,7 +2,7 @@ // SPDX-License-Identifier: MPL-2.0 //go:generate go run ../../generate/servicepackage/main.go -//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -SkipTypesImp=true -ServiceTagsMap -ListTags -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ServiceTagsMap -ListTags -UpdateTags -KVTValues -SkipTypesImp // ONLY generate directives and package declaration! Do not add anything else to this file. package kafkaconnect diff --git a/internal/service/kafkaconnect/tags_gen.go b/internal/service/kafkaconnect/tags_gen.go index eb6ea9aeeab..27bd2906e04 100644 --- a/internal/service/kafkaconnect/tags_gen.go +++ b/internal/service/kafkaconnect/tags_gen.go @@ -48,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns kafkaconnect service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from kafkaconnect service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns kafkaconnect service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -73,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets kafkaconnect service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } From b270cf3f257074aac7d833bc50c57114c200dd38 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 10:47:41 -0400 Subject: [PATCH 06/16] Add 'names.KafkaConnectEndpointID'. --- names/names.go | 1 + 1 file changed, 1 insertion(+) diff --git a/names/names.go b/names/names.go index 2936f8697ad..4ceb5ef3e06 100644 --- a/names/names.go +++ b/names/names.go @@ -76,6 +76,7 @@ const ( IdentityStoreEndpointID = "identitystore" Inspector2EndpointID = "inspector2" KMSEndpointID = "kms" + KafkaConnectEndpointID = "kafkaconnect" KendraEndpointID = "kendra" LambdaEndpointID = "lambda" LexV2ModelsEndpointID = "models-v2-lex" From 8240f27c885d4c1c18b6841b260602c2d2cb8027 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 10:52:23 -0400 Subject: [PATCH 07/16] r/aws_mskconnect_connector: Migrate to AWS SDK for Go v2. --- internal/service/kafkaconnect/connector.go | 474 ++++++++++-------- .../service/kafkaconnect/connector_test.go | 81 +-- internal/service/kafkaconnect/exports_test.go | 11 + internal/service/kafkaconnect/find.go | 25 - .../kafkaconnect/service_package_gen.go | 3 +- internal/service/kafkaconnect/status.go | 16 - internal/service/kafkaconnect/wait.go | 63 --- 7 files changed, 299 insertions(+), 374 deletions(-) create mode 100644 internal/service/kafkaconnect/exports_test.go diff --git a/internal/service/kafkaconnect/connector.go b/internal/service/kafkaconnect/connector.go index ea7837c2a8b..5fe6cb27200 100644 --- a/internal/service/kafkaconnect/connector.go +++ b/internal/service/kafkaconnect/connector.go @@ -5,16 +5,20 @@ package kafkaconnect import ( "context" + "fmt" "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -23,9 +27,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_mskconnect_connector") +// @SDKResource("aws_mskconnect_connector", name="Connector") // @Tags(identifierAttribute="arn") -func ResourceConnector() *schema.Resource { +func resourceConnector() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceConnectorCreate, ReadWithoutTimeout: resourceConnectorRead, @@ -202,11 +206,11 @@ func ResourceConnector() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "authentication_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: kafkaconnect.KafkaClusterClientAuthenticationTypeNone, - ValidateFunc: validation.StringInSlice(kafkaconnect.KafkaClusterClientAuthenticationType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.KafkaClusterClientAuthenticationTypeNone, + ValidateDiagFunc: enum.Validate[awstypes.KafkaClusterClientAuthenticationType](), }, }, }, @@ -219,11 +223,11 @@ func ResourceConnector() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "encryption_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: kafkaconnect.KafkaClusterEncryptionInTransitTypePlaintext, - ValidateFunc: validation.StringInSlice(kafkaconnect.KafkaClusterEncryptionInTransitType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.KafkaClusterEncryptionInTransitTypePlaintext, + ValidateDiagFunc: enum.Validate[awstypes.KafkaClusterEncryptionInTransitType](), }, }, }, @@ -395,13 +399,12 @@ func ResourceConnector() *schema.Resource { func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) name := d.Get(names.AttrName).(string) input := &kafkaconnect.CreateConnectorInput{ Capacity: expandCapacity(d.Get("capacity").([]interface{})[0].(map[string]interface{})), - ConnectorConfiguration: flex.ExpandStringMap(d.Get("connector_configuration").(map[string]interface{})), + ConnectorConfiguration: flex.ExpandStringValueMap(d.Get("connector_configuration").(map[string]interface{})), ConnectorName: aws.String(name), KafkaCluster: expandCluster(d.Get("kafka_cluster").([]interface{})[0].(map[string]interface{})), KafkaClusterClientAuthentication: expandClusterClientAuthentication(d.Get("kafka_cluster_client_authentication").([]interface{})[0].(map[string]interface{})), @@ -424,18 +427,15 @@ func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta i input.WorkerConfiguration = expandWorkerConfiguration(v.([]interface{})[0].(map[string]interface{})) } - log.Printf("[DEBUG] Creating MSK Connect Connector: %s", input) - output, err := conn.CreateConnectorWithContext(ctx, input) + output, err := conn.CreateConnector(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK Connect Connector (%s): %s", name, err) } - d.SetId(aws.StringValue(output.ConnectorArn)) - - _, err = waitConnectorCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) + d.SetId(aws.ToString(output.ConnectorArn)) - if err != nil { + if _, err := waitConnectorCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Connector (%s) create: %s", d.Id(), err) } @@ -444,10 +444,9 @@ func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta i func resourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - connector, err := FindConnectorByARN(ctx, conn, d.Id()) + connector, err := findConnectorByARN(ctx, conn, d.Id()) if tfresource.NotFound(err) && !d.IsNewResource() { log.Printf("[WARN] MSK Connect Connector (%s) not found, removing from state", d.Id()) @@ -467,7 +466,7 @@ func resourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta int } else { d.Set("capacity", nil) } - d.Set("connector_configuration", aws.StringValueMap(connector.ConnectorConfiguration)) + d.Set("connector_configuration", connector.ConnectorConfiguration) d.Set(names.AttrDescription, connector.ConnectorDescription) if connector.KafkaCluster != nil { if err := d.Set("kafka_cluster", []interface{}{flattenClusterDescription(connector.KafkaCluster)}); err != nil { @@ -517,8 +516,7 @@ func resourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta int func resourceConnectorUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &kafkaconnect.UpdateConnectorInput{ @@ -527,16 +525,13 @@ func resourceConnectorUpdate(ctx context.Context, d *schema.ResourceData, meta i CurrentVersion: aws.String(d.Get(names.AttrVersion).(string)), } - log.Printf("[DEBUG] Updating MSK Connect Connector: %s", input) - _, err := conn.UpdateConnectorWithContext(ctx, input) + _, err := conn.UpdateConnector(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Connect Connector (%s): %s", d.Id(), err) } - _, err = waitConnectorUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - - if err != nil { + if _, err := waitConnectorUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Connector (%s) update: %s", d.Id(), err) } } @@ -546,15 +541,14 @@ func resourceConnectorUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceConnectorDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) log.Printf("[DEBUG] Deleting MSK Connect Connector: %s", d.Id()) - _, err := conn.DeleteConnectorWithContext(ctx, &kafkaconnect.DeleteConnectorInput{ + _, err := conn.DeleteConnector(ctx, &kafkaconnect.DeleteConnectorInput{ ConnectorArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return diags } @@ -562,21 +556,123 @@ func resourceConnectorDelete(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "deleting MSK Connect Connector (%s): %s", d.Id(), err) } - _, err = waitConnectorDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) - - if err != nil { + if _, err := waitConnectorDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Connector (%s) delete: %s", d.Id(), err) } return diags } -func expandCapacity(tfMap map[string]interface{}) *kafkaconnect.Capacity { +func findConnectorByARN(ctx context.Context, conn *kafkaconnect.Client, arn string) (*kafkaconnect.DescribeConnectorOutput, error) { + input := &kafkaconnect.DescribeConnectorInput{ + ConnectorArn: aws.String(arn), + } + + output, err := conn.DescribeConnector(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusConnector(ctx context.Context, conn *kafkaconnect.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findConnectorByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.ConnectorState), nil + } +} + +func waitConnectorCreated(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ConnectorStateCreating), + Target: enum.Slice(awstypes.ConnectorStateRunning), + Refresh: statusConnector(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { + if state, stateDescription := output.ConnectorState, output.StateDescription; state == awstypes.ConnectorStateFailed && stateDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateDescription.Code), aws.ToString(stateDescription.Message))) + } + + return output, err + } + + return nil, err +} + +func waitConnectorUpdated(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ConnectorStateUpdating), + Target: enum.Slice(awstypes.ConnectorStateRunning), + Refresh: statusConnector(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { + if state, stateDescription := output.ConnectorState, output.StateDescription; state == awstypes.ConnectorStateFailed && stateDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateDescription.Code), aws.ToString(stateDescription.Message))) + } + + return output, err + } + + return nil, err +} + +func waitConnectorDeleted(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ConnectorStateDeleting), + Target: []string{}, + Refresh: statusConnector(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { + if state, stateDescription := output.ConnectorState, output.StateDescription; state == awstypes.ConnectorStateFailed && stateDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateDescription.Code), aws.ToString(stateDescription.Message))) + } + + return output, err + } + + return nil, err +} + +func expandCapacity(tfMap map[string]interface{}) *awstypes.Capacity { if tfMap == nil { return nil } - apiObject := &kafkaconnect.Capacity{} + apiObject := &awstypes.Capacity{} if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 { apiObject.AutoScaling = expandAutoScaling(v[0].(map[string]interface{})) @@ -589,23 +685,23 @@ func expandCapacity(tfMap map[string]interface{}) *kafkaconnect.Capacity { return apiObject } -func expandAutoScaling(tfMap map[string]interface{}) *kafkaconnect.AutoScaling { +func expandAutoScaling(tfMap map[string]interface{}) *awstypes.AutoScaling { if tfMap == nil { return nil } - apiObject := &kafkaconnect.AutoScaling{} + apiObject := &awstypes.AutoScaling{} if v, ok := tfMap["max_worker_count"].(int); ok && v != 0 { - apiObject.MaxWorkerCount = aws.Int64(int64(v)) + apiObject.MaxWorkerCount = int32(v) } if v, ok := tfMap["mcu_count"].(int); ok && v != 0 { - apiObject.McuCount = aws.Int64(int64(v)) + apiObject.McuCount = int32(v) } if v, ok := tfMap["min_worker_count"].(int); ok && v != 0 { - apiObject.MinWorkerCount = aws.Int64(int64(v)) + apiObject.MinWorkerCount = int32(v) } if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 { @@ -619,58 +715,58 @@ func expandAutoScaling(tfMap map[string]interface{}) *kafkaconnect.AutoScaling { return apiObject } -func expandScaleInPolicy(tfMap map[string]interface{}) *kafkaconnect.ScaleInPolicy { +func expandScaleInPolicy(tfMap map[string]interface{}) *awstypes.ScaleInPolicy { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ScaleInPolicy{} + apiObject := &awstypes.ScaleInPolicy{} if v, ok := tfMap["cpu_utilization_percentage"].(int); ok && v != 0 { - apiObject.CpuUtilizationPercentage = aws.Int64(int64(v)) + apiObject.CpuUtilizationPercentage = int32(v) } return apiObject } -func expandScaleOutPolicy(tfMap map[string]interface{}) *kafkaconnect.ScaleOutPolicy { +func expandScaleOutPolicy(tfMap map[string]interface{}) *awstypes.ScaleOutPolicy { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ScaleOutPolicy{} + apiObject := &awstypes.ScaleOutPolicy{} if v, ok := tfMap["cpu_utilization_percentage"].(int); ok && v != 0 { - apiObject.CpuUtilizationPercentage = aws.Int64(int64(v)) + apiObject.CpuUtilizationPercentage = int32(v) } return apiObject } -func expandProvisionedCapacity(tfMap map[string]interface{}) *kafkaconnect.ProvisionedCapacity { +func expandProvisionedCapacity(tfMap map[string]interface{}) *awstypes.ProvisionedCapacity { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ProvisionedCapacity{} + apiObject := &awstypes.ProvisionedCapacity{} if v, ok := tfMap["mcu_count"].(int); ok && v != 0 { - apiObject.McuCount = aws.Int64(int64(v)) + apiObject.McuCount = int32(v) } if v, ok := tfMap["worker_count"].(int); ok && v != 0 { - apiObject.WorkerCount = aws.Int64(int64(v)) + apiObject.WorkerCount = int32(v) } return apiObject } -func expandCapacityUpdate(tfMap map[string]interface{}) *kafkaconnect.CapacityUpdate { +func expandCapacityUpdate(tfMap map[string]interface{}) *awstypes.CapacityUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.CapacityUpdate{} + apiObject := &awstypes.CapacityUpdate{} if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 { apiObject.AutoScaling = expandAutoScalingUpdate(v[0].(map[string]interface{})) @@ -683,23 +779,23 @@ func expandCapacityUpdate(tfMap map[string]interface{}) *kafkaconnect.CapacityUp return apiObject } -func expandAutoScalingUpdate(tfMap map[string]interface{}) *kafkaconnect.AutoScalingUpdate { +func expandAutoScalingUpdate(tfMap map[string]interface{}) *awstypes.AutoScalingUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.AutoScalingUpdate{} + apiObject := &awstypes.AutoScalingUpdate{} if v, ok := tfMap["max_worker_count"].(int); ok { - apiObject.MaxWorkerCount = aws.Int64(int64(v)) + apiObject.MaxWorkerCount = int32(v) } if v, ok := tfMap["mcu_count"].(int); ok { - apiObject.McuCount = aws.Int64(int64(v)) + apiObject.McuCount = int32(v) } if v, ok := tfMap["min_worker_count"].(int); ok { - apiObject.MinWorkerCount = aws.Int64(int64(v)) + apiObject.MinWorkerCount = int32(v) } if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 { @@ -713,58 +809,58 @@ func expandAutoScalingUpdate(tfMap map[string]interface{}) *kafkaconnect.AutoSca return apiObject } -func expandScaleInPolicyUpdate(tfMap map[string]interface{}) *kafkaconnect.ScaleInPolicyUpdate { +func expandScaleInPolicyUpdate(tfMap map[string]interface{}) *awstypes.ScaleInPolicyUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ScaleInPolicyUpdate{} + apiObject := &awstypes.ScaleInPolicyUpdate{} if v, ok := tfMap["cpu_utilization_percentage"].(int); ok { - apiObject.CpuUtilizationPercentage = aws.Int64(int64(v)) + apiObject.CpuUtilizationPercentage = int32(v) } return apiObject } -func expandScaleOutPolicyUpdate(tfMap map[string]interface{}) *kafkaconnect.ScaleOutPolicyUpdate { +func expandScaleOutPolicyUpdate(tfMap map[string]interface{}) *awstypes.ScaleOutPolicyUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ScaleOutPolicyUpdate{} + apiObject := &awstypes.ScaleOutPolicyUpdate{} if v, ok := tfMap["cpu_utilization_percentage"].(int); ok { - apiObject.CpuUtilizationPercentage = aws.Int64(int64(v)) + apiObject.CpuUtilizationPercentage = int32(v) } return apiObject } -func expandProvisionedCapacityUpdate(tfMap map[string]interface{}) *kafkaconnect.ProvisionedCapacityUpdate { +func expandProvisionedCapacityUpdate(tfMap map[string]interface{}) *awstypes.ProvisionedCapacityUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ProvisionedCapacityUpdate{} + apiObject := &awstypes.ProvisionedCapacityUpdate{} if v, ok := tfMap["mcu_count"].(int); ok { - apiObject.McuCount = aws.Int64(int64(v)) + apiObject.McuCount = int32(v) } if v, ok := tfMap["worker_count"].(int); ok { - apiObject.WorkerCount = aws.Int64(int64(v)) + apiObject.WorkerCount = int32(v) } return apiObject } -func expandCluster(tfMap map[string]interface{}) *kafkaconnect.KafkaCluster { +func expandCluster(tfMap map[string]interface{}) *awstypes.KafkaCluster { if tfMap == nil { return nil } - apiObject := &kafkaconnect.KafkaCluster{} + apiObject := &awstypes.KafkaCluster{} if v, ok := tfMap["apache_kafka_cluster"].([]interface{}); ok && len(v) > 0 { apiObject.ApacheKafkaCluster = expandApacheCluster(v[0].(map[string]interface{})) @@ -773,12 +869,12 @@ func expandCluster(tfMap map[string]interface{}) *kafkaconnect.KafkaCluster { return apiObject } -func expandApacheCluster(tfMap map[string]interface{}) *kafkaconnect.ApacheKafkaCluster { +func expandApacheCluster(tfMap map[string]interface{}) *awstypes.ApacheKafkaCluster { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ApacheKafkaCluster{} + apiObject := &awstypes.ApacheKafkaCluster{} if v, ok := tfMap["bootstrap_servers"].(string); ok && v != "" { apiObject.BootstrapServers = aws.String(v) @@ -791,58 +887,58 @@ func expandApacheCluster(tfMap map[string]interface{}) *kafkaconnect.ApacheKafka return apiObject } -func expandVPC(tfMap map[string]interface{}) *kafkaconnect.Vpc { +func expandVPC(tfMap map[string]interface{}) *awstypes.Vpc { if tfMap == nil { return nil } - apiObject := &kafkaconnect.Vpc{} + apiObject := &awstypes.Vpc{} if v, ok := tfMap[names.AttrSecurityGroups].(*schema.Set); ok && v.Len() > 0 { - apiObject.SecurityGroups = flex.ExpandStringSet(v) + apiObject.SecurityGroups = flex.ExpandStringValueSet(v) } if v, ok := tfMap[names.AttrSubnets].(*schema.Set); ok && v.Len() > 0 { - apiObject.Subnets = flex.ExpandStringSet(v) + apiObject.Subnets = flex.ExpandStringValueSet(v) } return apiObject } -func expandClusterClientAuthentication(tfMap map[string]interface{}) *kafkaconnect.KafkaClusterClientAuthentication { +func expandClusterClientAuthentication(tfMap map[string]interface{}) *awstypes.KafkaClusterClientAuthentication { if tfMap == nil { return nil } - apiObject := &kafkaconnect.KafkaClusterClientAuthentication{} + apiObject := &awstypes.KafkaClusterClientAuthentication{} if v, ok := tfMap["authentication_type"].(string); ok && v != "" { - apiObject.AuthenticationType = aws.String(v) + apiObject.AuthenticationType = awstypes.KafkaClusterClientAuthenticationType(v) } return apiObject } -func expandClusterEncryptionInTransit(tfMap map[string]interface{}) *kafkaconnect.KafkaClusterEncryptionInTransit { +func expandClusterEncryptionInTransit(tfMap map[string]interface{}) *awstypes.KafkaClusterEncryptionInTransit { if tfMap == nil { return nil } - apiObject := &kafkaconnect.KafkaClusterEncryptionInTransit{} + apiObject := &awstypes.KafkaClusterEncryptionInTransit{} if v, ok := tfMap["encryption_type"].(string); ok && v != "" { - apiObject.EncryptionType = aws.String(v) + apiObject.EncryptionType = awstypes.KafkaClusterEncryptionInTransitType(v) } return apiObject } -func expandPlugin(tfMap map[string]interface{}) *kafkaconnect.Plugin { +func expandPlugin(tfMap map[string]interface{}) *awstypes.Plugin { if tfMap == nil { return nil } - apiObject := &kafkaconnect.Plugin{} + apiObject := &awstypes.Plugin{} if v, ok := tfMap["custom_plugin"].([]interface{}); ok && len(v) > 0 { apiObject.CustomPlugin = expandCustomPlugin(v[0].(map[string]interface{})) @@ -851,16 +947,15 @@ func expandPlugin(tfMap map[string]interface{}) *kafkaconnect.Plugin { return apiObject } -func expandPlugins(tfList []interface{}) []*kafkaconnect.Plugin { +func expandPlugins(tfList []interface{}) []awstypes.Plugin { if len(tfList) == 0 { return nil } - var apiObjects []*kafkaconnect.Plugin + var apiObjects []awstypes.Plugin for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) - if !ok { continue } @@ -871,36 +966,36 @@ func expandPlugins(tfList []interface{}) []*kafkaconnect.Plugin { continue } - apiObjects = append(apiObjects, apiObject) + apiObjects = append(apiObjects, *apiObject) } return apiObjects } -func expandCustomPlugin(tfMap map[string]interface{}) *kafkaconnect.CustomPlugin { +func expandCustomPlugin(tfMap map[string]interface{}) *awstypes.CustomPlugin { if tfMap == nil { return nil } - apiObject := &kafkaconnect.CustomPlugin{} + apiObject := &awstypes.CustomPlugin{} if v, ok := tfMap[names.AttrARN].(string); ok && v != "" { apiObject.CustomPluginArn = aws.String(v) } if v, ok := tfMap["revision"].(int); ok && v != 0 { - apiObject.Revision = aws.Int64(int64(v)) + apiObject.Revision = int64(v) } return apiObject } -func expandLogDelivery(tfMap map[string]interface{}) *kafkaconnect.LogDelivery { +func expandLogDelivery(tfMap map[string]interface{}) *awstypes.LogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.LogDelivery{} + apiObject := &awstypes.LogDelivery{} if v, ok := tfMap["worker_log_delivery"].([]interface{}); ok && len(v) > 0 { apiObject.WorkerLogDelivery = expandWorkerLogDelivery(v[0].(map[string]interface{})) @@ -909,12 +1004,12 @@ func expandLogDelivery(tfMap map[string]interface{}) *kafkaconnect.LogDelivery { return apiObject } -func expandWorkerLogDelivery(tfMap map[string]interface{}) *kafkaconnect.WorkerLogDelivery { +func expandWorkerLogDelivery(tfMap map[string]interface{}) *awstypes.WorkerLogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.WorkerLogDelivery{} + apiObject := &awstypes.WorkerLogDelivery{} if v, ok := tfMap[names.AttrCloudWatchLogs].([]interface{}); ok && len(v) > 0 { apiObject.CloudWatchLogs = expandCloudWatchLogsLogDelivery(v[0].(map[string]interface{})) @@ -931,15 +1026,15 @@ func expandWorkerLogDelivery(tfMap map[string]interface{}) *kafkaconnect.WorkerL return apiObject } -func expandCloudWatchLogsLogDelivery(tfMap map[string]interface{}) *kafkaconnect.CloudWatchLogsLogDelivery { +func expandCloudWatchLogsLogDelivery(tfMap map[string]interface{}) *awstypes.CloudWatchLogsLogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.CloudWatchLogsLogDelivery{} + apiObject := &awstypes.CloudWatchLogsLogDelivery{} if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObject.Enabled = aws.Bool(v) + apiObject.Enabled = v } if v, ok := tfMap["log_group"].(string); ok && v != "" { @@ -949,37 +1044,37 @@ func expandCloudWatchLogsLogDelivery(tfMap map[string]interface{}) *kafkaconnect return apiObject } -func expandFirehoseLogDelivery(tfMap map[string]interface{}) *kafkaconnect.FirehoseLogDelivery { +func expandFirehoseLogDelivery(tfMap map[string]interface{}) *awstypes.FirehoseLogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.FirehoseLogDelivery{} + apiObject := &awstypes.FirehoseLogDelivery{} if v, ok := tfMap["delivery_stream"].(string); ok && v != "" { apiObject.DeliveryStream = aws.String(v) } if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObject.Enabled = aws.Bool(v) + apiObject.Enabled = v } return apiObject } -func expandS3LogDelivery(tfMap map[string]interface{}) *kafkaconnect.S3LogDelivery { +func expandS3LogDelivery(tfMap map[string]interface{}) *awstypes.S3LogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.S3LogDelivery{} + apiObject := &awstypes.S3LogDelivery{} if v, ok := tfMap[names.AttrBucket].(string); ok && v != "" { apiObject.Bucket = aws.String(v) } if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObject.Enabled = aws.Bool(v) + apiObject.Enabled = v } if v, ok := tfMap[names.AttrPrefix].(string); ok && v != "" { @@ -989,15 +1084,15 @@ func expandS3LogDelivery(tfMap map[string]interface{}) *kafkaconnect.S3LogDelive return apiObject } -func expandWorkerConfiguration(tfMap map[string]interface{}) *kafkaconnect.WorkerConfiguration { +func expandWorkerConfiguration(tfMap map[string]interface{}) *awstypes.WorkerConfiguration { if tfMap == nil { return nil } - apiObject := &kafkaconnect.WorkerConfiguration{} + apiObject := &awstypes.WorkerConfiguration{} if v, ok := tfMap["revision"].(int); ok && v != 0 { - apiObject.Revision = aws.Int64(int64(v)) + apiObject.Revision = int64(v) } if v, ok := tfMap[names.AttrARN].(string); ok && v != "" { @@ -1007,7 +1102,7 @@ func expandWorkerConfiguration(tfMap map[string]interface{}) *kafkaconnect.Worke return apiObject } -func flattenCapacityDescription(apiObject *kafkaconnect.CapacityDescription) map[string]interface{} { +func flattenCapacityDescription(apiObject *awstypes.CapacityDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1025,23 +1120,15 @@ func flattenCapacityDescription(apiObject *kafkaconnect.CapacityDescription) map return tfMap } -func flattenAutoScalingDescription(apiObject *kafkaconnect.AutoScalingDescription) map[string]interface{} { +func flattenAutoScalingDescription(apiObject *awstypes.AutoScalingDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.MaxWorkerCount; v != nil { - tfMap["max_worker_count"] = aws.Int64Value(v) - } - - if v := apiObject.McuCount; v != nil { - tfMap["mcu_count"] = aws.Int64Value(v) - } - - if v := apiObject.MinWorkerCount; v != nil { - tfMap["min_worker_count"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "max_worker_count": apiObject.MaxWorkerCount, + "mcu_count": apiObject.McuCount, + "min_worker_count": apiObject.MinWorkerCount, } if v := apiObject.ScaleInPolicy; v != nil { @@ -1055,53 +1142,44 @@ func flattenAutoScalingDescription(apiObject *kafkaconnect.AutoScalingDescriptio return tfMap } -func flattenScaleInPolicyDescription(apiObject *kafkaconnect.ScaleInPolicyDescription) map[string]interface{} { +func flattenScaleInPolicyDescription(apiObject *awstypes.ScaleInPolicyDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.CpuUtilizationPercentage; v != nil { - tfMap["cpu_utilization_percentage"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "cpu_utilization_percentage": apiObject.CpuUtilizationPercentage, } return tfMap } -func flattenScaleOutPolicyDescription(apiObject *kafkaconnect.ScaleOutPolicyDescription) map[string]interface{} { +func flattenScaleOutPolicyDescription(apiObject *awstypes.ScaleOutPolicyDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.CpuUtilizationPercentage; v != nil { - tfMap["cpu_utilization_percentage"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "cpu_utilization_percentage": apiObject.CpuUtilizationPercentage, } return tfMap } -func flattenProvisionedCapacityDescription(apiObject *kafkaconnect.ProvisionedCapacityDescription) map[string]interface{} { +func flattenProvisionedCapacityDescription(apiObject *awstypes.ProvisionedCapacityDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.McuCount; v != nil { - tfMap["mcu_count"] = aws.Int64Value(v) - } - - if v := apiObject.WorkerCount; v != nil { - tfMap["worker_count"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "mcu_count": apiObject.McuCount, + "worker_count": apiObject.WorkerCount, } return tfMap } -func flattenClusterDescription(apiObject *kafkaconnect.KafkaClusterDescription) map[string]interface{} { +func flattenClusterDescription(apiObject *awstypes.KafkaClusterDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1115,7 +1193,7 @@ func flattenClusterDescription(apiObject *kafkaconnect.KafkaClusterDescription) return tfMap } -func flattenApacheClusterDescription(apiObject *kafkaconnect.ApacheKafkaClusterDescription) map[string]interface{} { +func flattenApacheClusterDescription(apiObject *awstypes.ApacheKafkaClusterDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1123,7 +1201,7 @@ func flattenApacheClusterDescription(apiObject *kafkaconnect.ApacheKafkaClusterD tfMap := map[string]interface{}{} if v := apiObject.BootstrapServers; v != nil { - tfMap["bootstrap_servers"] = aws.StringValue(v) + tfMap["bootstrap_servers"] = aws.ToString(v) } if v := apiObject.Vpc; v != nil { @@ -1133,7 +1211,7 @@ func flattenApacheClusterDescription(apiObject *kafkaconnect.ApacheKafkaClusterD return tfMap } -func flattenVPCDescription(apiObject *kafkaconnect.VpcDescription) map[string]interface{} { +func flattenVPCDescription(apiObject *awstypes.VpcDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1141,45 +1219,41 @@ func flattenVPCDescription(apiObject *kafkaconnect.VpcDescription) map[string]in tfMap := map[string]interface{}{} if v := apiObject.SecurityGroups; v != nil { - tfMap[names.AttrSecurityGroups] = aws.StringValueSlice(v) + tfMap[names.AttrSecurityGroups] = v } if v := apiObject.Subnets; v != nil { - tfMap[names.AttrSubnets] = aws.StringValueSlice(v) + tfMap[names.AttrSubnets] = v } return tfMap } -func flattenClusterClientAuthenticationDescription(apiObject *kafkaconnect.KafkaClusterClientAuthenticationDescription) map[string]interface{} { +func flattenClusterClientAuthenticationDescription(apiObject *awstypes.KafkaClusterClientAuthenticationDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.AuthenticationType; v != nil { - tfMap["authentication_type"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "authentication_type": apiObject.AuthenticationType, } return tfMap } -func flattenClusterEncryptionInTransitDescription(apiObject *kafkaconnect.KafkaClusterEncryptionInTransitDescription) map[string]interface{} { +func flattenClusterEncryptionInTransitDescription(apiObject *awstypes.KafkaClusterEncryptionInTransitDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.EncryptionType; v != nil { - tfMap["encryption_type"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "encryption_type": apiObject.EncryptionType, } return tfMap } -func flattenPluginDescription(apiObject *kafkaconnect.PluginDescription) map[string]interface{} { +func flattenPluginDescription(apiObject *awstypes.PluginDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1193,7 +1267,7 @@ func flattenPluginDescription(apiObject *kafkaconnect.PluginDescription) map[str return tfMap } -func flattenPluginDescriptions(apiObjects []*kafkaconnect.PluginDescription) []interface{} { +func flattenPluginDescriptions(apiObjects []awstypes.PluginDescription) []interface{} { if len(apiObjects) == 0 { return nil } @@ -1201,35 +1275,29 @@ func flattenPluginDescriptions(apiObjects []*kafkaconnect.PluginDescription) []i var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - tfList = append(tfList, flattenPluginDescription(apiObject)) + tfList = append(tfList, flattenPluginDescription(&apiObject)) } return tfList } -func flattenCustomPluginDescription(apiObject *kafkaconnect.CustomPluginDescription) map[string]interface{} { +func flattenCustomPluginDescription(apiObject *awstypes.CustomPluginDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.CustomPluginArn; v != nil { - tfMap[names.AttrARN] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "revision": apiObject.Revision, } - if v := apiObject.Revision; v != nil { - tfMap["revision"] = aws.Int64Value(v) + if v := apiObject.CustomPluginArn; v != nil { + tfMap[names.AttrARN] = aws.ToString(v) } return tfMap } -func flattenLogDeliveryDescription(apiObject *kafkaconnect.LogDeliveryDescription) map[string]interface{} { +func flattenLogDeliveryDescription(apiObject *awstypes.LogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1243,7 +1311,7 @@ func flattenLogDeliveryDescription(apiObject *kafkaconnect.LogDeliveryDescriptio return tfMap } -func flattenWorkerLogDeliveryDescription(apiObject *kafkaconnect.WorkerLogDeliveryDescription) map[string]interface{} { +func flattenWorkerLogDeliveryDescription(apiObject *awstypes.WorkerLogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1265,77 +1333,69 @@ func flattenWorkerLogDeliveryDescription(apiObject *kafkaconnect.WorkerLogDelive return tfMap } -func flattenCloudWatchLogsLogDeliveryDescription(apiObject *kafkaconnect.CloudWatchLogsLogDeliveryDescription) map[string]interface{} { +func flattenCloudWatchLogsLogDeliveryDescription(apiObject *awstypes.CloudWatchLogsLogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Enabled; v != nil { - tfMap[names.AttrEnabled] = aws.BoolValue(v) + tfMap := map[string]interface{}{ + names.AttrEnabled: apiObject.Enabled, } if v := apiObject.LogGroup; v != nil { - tfMap["log_group"] = aws.StringValue(v) + tfMap["log_group"] = aws.ToString(v) } return tfMap } -func flattenFirehoseLogDeliveryDescription(apiObject *kafkaconnect.FirehoseLogDeliveryDescription) map[string]interface{} { +func flattenFirehoseLogDeliveryDescription(apiObject *awstypes.FirehoseLogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.DeliveryStream; v != nil { - tfMap["delivery_stream"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + names.AttrEnabled: apiObject.Enabled, } - if v := apiObject.Enabled; v != nil { - tfMap[names.AttrEnabled] = aws.BoolValue(v) + if v := apiObject.DeliveryStream; v != nil { + tfMap["delivery_stream"] = aws.ToString(v) } return tfMap } -func flattenS3LogDeliveryDescription(apiObject *kafkaconnect.S3LogDeliveryDescription) map[string]interface{} { +func flattenS3LogDeliveryDescription(apiObject *awstypes.S3LogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Bucket; v != nil { - tfMap[names.AttrBucket] = aws.StringValue(v) + tfMap := map[string]interface{}{ + names.AttrEnabled: apiObject.Enabled, } - if v := apiObject.Enabled; v != nil { - tfMap[names.AttrEnabled] = aws.BoolValue(v) + if v := apiObject.Bucket; v != nil { + tfMap[names.AttrBucket] = aws.ToString(v) } if v := apiObject.Prefix; v != nil { - tfMap[names.AttrPrefix] = aws.StringValue(v) + tfMap[names.AttrPrefix] = aws.ToString(v) } return tfMap } -func flattenWorkerConfigurationDescription(apiObject *kafkaconnect.WorkerConfigurationDescription) map[string]interface{} { +func flattenWorkerConfigurationDescription(apiObject *awstypes.WorkerConfigurationDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Revision; v != nil { - tfMap["revision"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "revision": apiObject.Revision, } if v := apiObject.WorkerConfigurationArn; v != nil { - tfMap[names.AttrARN] = aws.StringValue(v) + tfMap[names.AttrARN] = aws.ToString(v) } return tfMap diff --git a/internal/service/kafkaconnect/connector_test.go b/internal/service/kafkaconnect/connector_test.go index 12f854c9769..90a0d3197f5 100644 --- a/internal/service/kafkaconnect/connector_test.go +++ b/internal/service/kafkaconnect/connector_test.go @@ -8,7 +8,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafkaconnect" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -25,7 +24,7 @@ func TestAccKafkaConnectConnector_basic(t *testing.T) { resourceName := "aws_mskconnect_connector.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckConnectorDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -89,7 +88,7 @@ func TestAccKafkaConnectConnector_disappears(t *testing.T) { resourceName := "aws_mskconnect_connector.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckConnectorDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -112,7 +111,7 @@ func TestAccKafkaConnectConnector_update(t *testing.T) { resourceName := "aws_mskconnect_connector.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckConnectorDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -237,7 +236,7 @@ func TestAccKafkaConnectConnector_tags(t *testing.T) { resourceName := "aws_mskconnect_connector.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckConnectorDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -283,11 +282,7 @@ func testAccCheckConnectorExists(ctx context.Context, n string) resource.TestChe return fmt.Errorf("not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No MSK Connect Connector ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) _, err := tfkafkaconnect.FindConnectorByARN(ctx, conn, rs.Primary.ID) @@ -297,7 +292,7 @@ func testAccCheckConnectorExists(ctx context.Context, n string) resource.TestChe func testAccCheckConnectorDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_mskconnect_connector" { @@ -321,46 +316,8 @@ func testAccCheckConnectorDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccConnectorBaseConfig(rName string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "10.10.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test1" { - vpc_id = aws_vpc.test.id - cidr_block = "10.10.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test2" { - vpc_id = aws_vpc.test.id - cidr_block = "10.10.2.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test3" { - vpc_id = aws_vpc.test.id - cidr_block = "10.10.3.0/24" - availability_zone = data.aws_availability_zones.available.names[2] - - tags = { - Name = %[1]q - } -} - +func testAccConnectorConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 3), fmt.Sprintf(` resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id name = %[1]q @@ -447,7 +404,7 @@ resource "aws_msk_cluster" "test" { number_of_broker_nodes = 3 broker_node_group_info { - client_subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + client_subnets = aws_subnet.test[*].id instance_type = "kafka.m5.large" security_groups = [aws_security_group.test.id] @@ -464,7 +421,7 @@ resource "aws_msk_cluster" "test" { func testAccConnectorConfig_basic(rName string) string { return acctest.ConfigCompose( testAccCustomPluginConfig_basic(rName), - testAccConnectorBaseConfig(rName), + testAccConnectorConfig_base(rName), fmt.Sprintf(` resource "aws_mskconnect_connector" "test" { name = %[1]q @@ -490,7 +447,7 @@ resource "aws_mskconnect_connector" "test" { vpc { security_groups = [aws_security_group.test.id] - subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + subnets = aws_subnet.test[*].id } } } @@ -525,7 +482,7 @@ func testAccConnectorConfig_allAttributes(rName string) string { return acctest.ConfigCompose( testAccCustomPluginConfig_basic(rName), testAccWorkerConfigurationConfig_basic(rName), - testAccConnectorBaseConfig(rName), + testAccConnectorConfig_base(rName), fmt.Sprintf(` resource "aws_cloudwatch_log_group" "test" { name = %[1]q @@ -564,7 +521,7 @@ resource "aws_mskconnect_connector" "test" { vpc { security_groups = [aws_security_group.test.id] - subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + subnets = aws_subnet.test[*].id } } } @@ -617,7 +574,7 @@ func testAccConnectorConfig_allAttributesCapacityUpdated(rName string) string { return acctest.ConfigCompose( testAccCustomPluginConfig_basic(rName), testAccWorkerConfigurationConfig_basic(rName), - testAccConnectorBaseConfig(rName), + testAccConnectorConfig_base(rName), fmt.Sprintf(` resource "aws_cloudwatch_log_group" "test" { name = %[1]q @@ -646,7 +603,7 @@ resource "aws_mskconnect_connector" "test" { vpc { security_groups = [aws_security_group.test.id] - subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + subnets = aws_subnet.test[*].id } } } @@ -698,7 +655,7 @@ resource "aws_mskconnect_connector" "test" { func testAccConnectorConfig_tags1(rName, tagKey1, tagValue1 string) string { return acctest.ConfigCompose( testAccCustomPluginConfig_basic(rName), - testAccConnectorBaseConfig(rName), + testAccConnectorConfig_base(rName), fmt.Sprintf(` resource "aws_mskconnect_connector" "test" { name = %[1]q @@ -724,7 +681,7 @@ resource "aws_mskconnect_connector" "test" { vpc { security_groups = [aws_security_group.test.id] - subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + subnets = aws_subnet.test[*].id } } } @@ -758,7 +715,7 @@ resource "aws_mskconnect_connector" "test" { func testAccConnectorConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return acctest.ConfigCompose( testAccCustomPluginConfig_basic(rName), - testAccConnectorBaseConfig(rName), + testAccConnectorConfig_base(rName), fmt.Sprintf(` resource "aws_mskconnect_connector" "test" { name = %[1]q @@ -784,7 +741,7 @@ resource "aws_mskconnect_connector" "test" { vpc { security_groups = [aws_security_group.test.id] - subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + subnets = aws_subnet.test[*].id } } } diff --git a/internal/service/kafkaconnect/exports_test.go b/internal/service/kafkaconnect/exports_test.go new file mode 100644 index 00000000000..1fa6f1ef329 --- /dev/null +++ b/internal/service/kafkaconnect/exports_test.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kafkaconnect + +// Exports for use in tests only. +var ( + ResourceConnector = resourceConnector + + FindConnectorByARN = findConnectorByARN +) diff --git a/internal/service/kafkaconnect/find.go b/internal/service/kafkaconnect/find.go index 52cab9333a0..9571d70702f 100644 --- a/internal/service/kafkaconnect/find.go +++ b/internal/service/kafkaconnect/find.go @@ -13,31 +13,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func FindConnectorByARN(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) (*kafkaconnect.DescribeConnectorOutput, error) { - input := &kafkaconnect.DescribeConnectorInput{ - ConnectorArn: aws.String(arn), - } - - output, err := conn.DescribeConnectorWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - func FindCustomPluginByARN(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) (*kafkaconnect.DescribeCustomPluginOutput, error) { input := &kafkaconnect.DescribeCustomPluginInput{ CustomPluginArn: aws.String(arn), diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index fc9f8a03373..5f8577e31ca 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -51,8 +51,9 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceConnector, + Factory: resourceConnector, TypeName: "aws_mskconnect_connector", + Name: "Connector", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }, diff --git a/internal/service/kafkaconnect/status.go b/internal/service/kafkaconnect/status.go index ecd768f427f..7cf710e3b67 100644 --- a/internal/service/kafkaconnect/status.go +++ b/internal/service/kafkaconnect/status.go @@ -12,22 +12,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func statusConnectorState(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindConnectorByARN(ctx, conn, arn) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.ConnectorState), nil - } -} - func statusCustomPluginState(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindCustomPluginByARN(ctx, conn, arn) diff --git a/internal/service/kafkaconnect/wait.go b/internal/service/kafkaconnect/wait.go index 4dca5c5366b..63bcea4b924 100644 --- a/internal/service/kafkaconnect/wait.go +++ b/internal/service/kafkaconnect/wait.go @@ -14,69 +14,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func waitConnectorCreated(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.ConnectorStateCreating}, - Target: []string{kafkaconnect.ConnectorStateRunning}, - Refresh: statusConnectorState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { - if state, stateDescription := aws.StringValue(output.ConnectorState), output.StateDescription; state == kafkaconnect.ConnectorStateFailed && stateDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateDescription.Code), aws.StringValue(stateDescription.Message))) - } - - return output, err - } - - return nil, err -} - -func waitConnectorDeleted(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.ConnectorStateDeleting}, - Target: []string{}, - Refresh: statusConnectorState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { - if state, stateDescription := aws.StringValue(output.ConnectorState), output.StateDescription; state == kafkaconnect.ConnectorStateFailed && stateDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateDescription.Code), aws.StringValue(stateDescription.Message))) - } - - return output, err - } - - return nil, err -} - -func waitConnectorUpdated(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.ConnectorStateUpdating}, - Target: []string{kafkaconnect.ConnectorStateRunning}, - Refresh: statusConnectorState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { - if state, stateDescription := aws.StringValue(output.ConnectorState), output.StateDescription; state == kafkaconnect.ConnectorStateFailed && stateDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateDescription.Code), aws.StringValue(stateDescription.Message))) - } - - return output, err - } - - return nil, err -} - func waitCustomPluginCreated(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeCustomPluginOutput, error) { stateConf := &retry.StateChangeConf{ Pending: []string{kafkaconnect.CustomPluginStateCreating}, From abe1de8fe35e65db41dc88d60be28ee8635a9a60 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 11:29:19 -0400 Subject: [PATCH 08/16] d/aws_mskconnect_connector: Migrate to AWS SDK for Go v2. --- .../kafkaconnect/connector_data_source.go | 81 +++++++++++-------- .../connector_data_source_test.go | 1 - .../kafkaconnect/service_package_gen.go | 3 +- 3 files changed, 49 insertions(+), 36 deletions(-) diff --git a/internal/service/kafkaconnect/connector_data_source.go b/internal/service/kafkaconnect/connector_data_source.go index 2ba8a5820a6..0e42ee45d7f 100644 --- a/internal/service/kafkaconnect/connector_data_source.go +++ b/internal/service/kafkaconnect/connector_data_source.go @@ -6,20 +6,22 @@ package kafkaconnect import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_mskconnect_connector") +// @SDKDataSource("aws_mskconnect_connector", name="Connector") // @Tags(identifierAttribute="arn") -func DataSourceConnector() *schema.Resource { +func dataSourceConnector() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceConnectorRead, @@ -47,48 +49,59 @@ func DataSourceConnector() *schema.Resource { func dataSourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + connector, err := findConnectorByName(ctx, conn, d.Get(names.AttrName).(string)) - name := d.Get(names.AttrName) - var output []*kafkaconnect.ConnectorSummary + if err != nil { + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Connect Connector", err)) + } - err := conn.ListConnectorsPagesWithContext(ctx, &kafkaconnect.ListConnectorsInput{}, func(page *kafkaconnect.ListConnectorsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + arn := aws.ToString(connector.ConnectorArn) + d.SetId(arn) + d.Set(names.AttrARN, arn) + d.Set(names.AttrDescription, connector.ConnectorDescription) + d.Set(names.AttrName, connector.ConnectorName) + d.Set(names.AttrVersion, connector.CurrentVersion) - for _, v := range page.Connectors { - if aws.StringValue(v.ConnectorName) == name { - output = append(output, v) - } - } + return diags +} - return !lastPage - }) +func findConnector(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListConnectorsInput, filter tfslices.Predicate[*awstypes.ConnectorSummary]) (*awstypes.ConnectorSummary, error) { + output, err := findConnectors(ctx, conn, input, filter) if err != nil { - return sdkdiag.AppendErrorf(diags, "listing MSK Connect Connectors: %s", err) + return nil, err } - if len(output) == 0 || output[0] == nil { - err = tfresource.NewEmptyResultError(name) - } else if count := len(output); count > 1 { - err = tfresource.NewTooManyResultsError(count, name) - } + return tfresource.AssertSingleValueResult(output) +} - if err != nil { - return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Connect Connector", err)) - } +func findConnectors(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListConnectorsInput, filter tfslices.Predicate[*awstypes.ConnectorSummary]) ([]awstypes.ConnectorSummary, error) { + var output []awstypes.ConnectorSummary - connector := output[0] + pages := kafkaconnect.NewListConnectorsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - d.SetId(aws.StringValue(connector.ConnectorArn)) + if err != nil { + return nil, err + } - d.Set(names.AttrARN, connector.ConnectorArn) - d.Set(names.AttrDescription, connector.ConnectorDescription) - d.Set(names.AttrName, connector.ConnectorName) - d.Set(names.AttrVersion, connector.CurrentVersion) + for _, v := range page.Connectors { + if filter(&v) { + output = append(output, v) + } + } + } - return diags + return output, nil +} + +func findConnectorByName(ctx context.Context, conn *kafkaconnect.Client, name string) (*awstypes.ConnectorSummary, error) { + input := &kafkaconnect.ListConnectorsInput{} + + return findConnector(ctx, conn, input, func(v *awstypes.ConnectorSummary) bool { + return aws.ToString(v.ConnectorName) == name + }) } diff --git a/internal/service/kafkaconnect/connector_data_source_test.go b/internal/service/kafkaconnect/connector_data_source_test.go index 036628be5b6..7cbe376cdcf 100644 --- a/internal/service/kafkaconnect/connector_data_source_test.go +++ b/internal/service/kafkaconnect/connector_data_source_test.go @@ -22,7 +22,6 @@ func TestAccKafkaConnectConnectorDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index 5f8577e31ca..617233992e1 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -25,8 +25,9 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceConnector, + Factory: dataSourceConnector, TypeName: "aws_mskconnect_connector", + Name: "Connector", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }, From 5e6b8ea84c89e466f2dee3f5b76189bfaedcc30a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 11:43:03 -0400 Subject: [PATCH 09/16] r/aws_mskconnect_custom_plugin: Migrate to AWS SDK for Go v2. --- .../service/kafkaconnect/custom_plugin.go | 152 +++++++++++++----- .../kafkaconnect/custom_plugin_test.go | 19 +-- internal/service/kafkaconnect/exports_test.go | 6 +- internal/service/kafkaconnect/find.go | 25 --- .../kafkaconnect/service_package_gen.go | 3 +- internal/service/kafkaconnect/status.go | 16 -- internal/service/kafkaconnect/wait.go | 41 ----- 7 files changed, 126 insertions(+), 136 deletions(-) diff --git a/internal/service/kafkaconnect/custom_plugin.go b/internal/service/kafkaconnect/custom_plugin.go index e2b0884d748..f86c357c382 100644 --- a/internal/service/kafkaconnect/custom_plugin.go +++ b/internal/service/kafkaconnect/custom_plugin.go @@ -5,16 +5,19 @@ package kafkaconnect import ( "context" + "fmt" "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -22,9 +25,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_mskconnect_custom_plugin") +// @SDKResource("aws_mskconnect_custom_plugin", name="Custom Plugin") // @Tags(identifierAttribute="arn") -func ResourceCustomPlugin() *schema.Resource { +func resourceCustomPlugin() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCustomPluginCreate, ReadWithoutTimeout: resourceCustomPluginRead, @@ -46,10 +49,10 @@ func ResourceCustomPlugin() *schema.Resource { Computed: true, }, names.AttrContentType: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(kafkaconnect.CustomPluginContentType_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.CustomPluginContentType](), }, names.AttrDescription: { Type: schema.TypeString, @@ -115,12 +118,11 @@ func ResourceCustomPlugin() *schema.Resource { func resourceCustomPluginCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) name := d.Get(names.AttrName).(string) input := &kafkaconnect.CreateCustomPluginInput{ - ContentType: aws.String(d.Get(names.AttrContentType).(string)), + ContentType: awstypes.CustomPluginContentType(d.Get(names.AttrContentType).(string)), Location: expandCustomPluginLocation(d.Get(names.AttrLocation).([]interface{})[0].(map[string]interface{})), Name: aws.String(name), Tags: getTagsIn(ctx), @@ -130,18 +132,15 @@ func resourceCustomPluginCreate(ctx context.Context, d *schema.ResourceData, met input.Description = aws.String(v.(string)) } - log.Printf("[DEBUG] Creating MSK Connect Custom Plugin: %s", input) - output, err := conn.CreateCustomPluginWithContext(ctx, input) + output, err := conn.CreateCustomPlugin(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK Connect Custom Plugin (%s): %s", name, err) } - d.SetId(aws.StringValue(output.CustomPluginArn)) - - _, err = waitCustomPluginCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) + d.SetId(aws.ToString(output.CustomPluginArn)) - if err != nil { + if _, err := waitCustomPluginCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Custom Plugin (%s) create: %s", d.Id(), err) } @@ -150,10 +149,9 @@ func resourceCustomPluginCreate(ctx context.Context, d *schema.ResourceData, met func resourceCustomPluginRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - plugin, err := FindCustomPluginByARN(ctx, conn, d.Id()) + plugin, err := findCustomPluginByARN(ctx, conn, d.Id()) if tfresource.NotFound(err) && !d.IsNewResource() { log.Printf("[WARN] MSK Connect Custom Plugin (%s) not found, removing from state", d.Id()) @@ -192,22 +190,21 @@ func resourceCustomPluginRead(ctx context.Context, d *schema.ResourceData, meta func resourceCustomPluginUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - // This update function is for updating tags only - there is no update action for this resource + // This update function is for updating tags only - there is no update action for this resource. return append(diags, resourceCustomPluginRead(ctx, d, meta)...) } func resourceCustomPluginDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) log.Printf("[DEBUG] Deleting MSK Connect Custom Plugin: %s", d.Id()) - _, err := conn.DeleteCustomPluginWithContext(ctx, &kafkaconnect.DeleteCustomPluginInput{ + _, err := conn.DeleteCustomPlugin(ctx, &kafkaconnect.DeleteCustomPluginInput{ CustomPluginArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return diags } @@ -215,21 +212,98 @@ func resourceCustomPluginDelete(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "deleting MSK Connect Custom Plugin (%s): %s", d.Id(), err) } - _, err = waitCustomPluginDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) - - if err != nil { + if _, err := waitCustomPluginDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Custom Plugin (%s) delete: %s", d.Id(), err) } return diags } -func expandCustomPluginLocation(tfMap map[string]interface{}) *kafkaconnect.CustomPluginLocation { +func findCustomPluginByARN(ctx context.Context, conn *kafkaconnect.Client, arn string) (*kafkaconnect.DescribeCustomPluginOutput, error) { + input := &kafkaconnect.DescribeCustomPluginInput{ + CustomPluginArn: aws.String(arn), + } + + output, err := conn.DescribeCustomPlugin(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusCustomPlugin(ctx context.Context, conn *kafkaconnect.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findCustomPluginByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.CustomPluginState), nil + } +} + +func waitCustomPluginCreated(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeCustomPluginOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.CustomPluginStateCreating), + Target: enum.Slice(awstypes.CustomPluginStateActive), + Refresh: statusCustomPlugin(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeCustomPluginOutput); ok { + if state, stateDescription := output.CustomPluginState, output.StateDescription; state == awstypes.CustomPluginStateCreateFailed && stateDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateDescription.Code), aws.ToString(stateDescription.Message))) + } + + return output, err + } + + return nil, err +} + +func waitCustomPluginDeleted(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeCustomPluginOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.CustomPluginStateDeleting), + Target: []string{}, + Refresh: statusCustomPlugin(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeCustomPluginOutput); ok { + return output, err + } + + return nil, err +} + +func expandCustomPluginLocation(tfMap map[string]interface{}) *awstypes.CustomPluginLocation { if tfMap == nil { return nil } - apiObject := &kafkaconnect.CustomPluginLocation{} + apiObject := &awstypes.CustomPluginLocation{} if v, ok := tfMap["s3"].([]interface{}); ok && len(v) > 0 { apiObject.S3Location = expandS3Location(v[0].(map[string]interface{})) @@ -238,12 +312,12 @@ func expandCustomPluginLocation(tfMap map[string]interface{}) *kafkaconnect.Cust return apiObject } -func expandS3Location(tfMap map[string]interface{}) *kafkaconnect.S3Location { +func expandS3Location(tfMap map[string]interface{}) *awstypes.S3Location { if tfMap == nil { return nil } - apiObject := &kafkaconnect.S3Location{} + apiObject := &awstypes.S3Location{} if v, ok := tfMap["bucket_arn"].(string); ok && v != "" { apiObject.BucketArn = aws.String(v) @@ -260,7 +334,7 @@ func expandS3Location(tfMap map[string]interface{}) *kafkaconnect.S3Location { return apiObject } -func flattenCustomPluginLocationDescription(apiObject *kafkaconnect.CustomPluginLocationDescription) map[string]interface{} { +func flattenCustomPluginLocationDescription(apiObject *awstypes.CustomPluginLocationDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -274,7 +348,7 @@ func flattenCustomPluginLocationDescription(apiObject *kafkaconnect.CustomPlugin return tfMap } -func flattenS3LocationDescription(apiObject *kafkaconnect.S3LocationDescription) map[string]interface{} { +func flattenS3LocationDescription(apiObject *awstypes.S3LocationDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -282,15 +356,15 @@ func flattenS3LocationDescription(apiObject *kafkaconnect.S3LocationDescription) tfMap := map[string]interface{}{} if v := apiObject.BucketArn; v != nil { - tfMap["bucket_arn"] = aws.StringValue(v) + tfMap["bucket_arn"] = aws.ToString(v) } if v := apiObject.FileKey; v != nil { - tfMap["file_key"] = aws.StringValue(v) + tfMap["file_key"] = aws.ToString(v) } if v := apiObject.ObjectVersion; v != nil { - tfMap["object_version"] = aws.StringValue(v) + tfMap["object_version"] = aws.ToString(v) } return tfMap diff --git a/internal/service/kafkaconnect/custom_plugin_test.go b/internal/service/kafkaconnect/custom_plugin_test.go index 3e8ab798303..dc54884b6b6 100644 --- a/internal/service/kafkaconnect/custom_plugin_test.go +++ b/internal/service/kafkaconnect/custom_plugin_test.go @@ -8,7 +8,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafkaconnect" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -25,7 +24,7 @@ func TestAccKafkaConnectCustomPlugin_basic(t *testing.T) { resourceName := "aws_mskconnect_custom_plugin.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckCustomPluginDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -62,7 +61,7 @@ func TestAccKafkaConnectCustomPlugin_disappears(t *testing.T) { resourceName := "aws_mskconnect_custom_plugin.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckCustomPluginDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -85,7 +84,7 @@ func TestAccKafkaConnectCustomPlugin_description(t *testing.T) { resourceName := "aws_mskconnect_custom_plugin.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckCustomPluginDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -112,7 +111,7 @@ func TestAccKafkaConnectCustomPlugin_tags(t *testing.T) { resourceName := "aws_mskconnect_custom_plugin.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckCustomPluginDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -157,7 +156,7 @@ func TestAccKafkaConnectCustomPlugin_objectVersion(t *testing.T) { resourceName := "aws_mskconnect_custom_plugin.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckCustomPluginDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -185,11 +184,7 @@ func testAccCheckCustomPluginExists(ctx context.Context, name string) resource.T return fmt.Errorf("Not found: %s", name) } - if rs.Primary.ID == "" { - return fmt.Errorf("No MSK Connect Custom Plugin ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) _, err := tfkafkaconnect.FindCustomPluginByARN(ctx, conn, rs.Primary.ID) @@ -199,7 +194,7 @@ func testAccCheckCustomPluginExists(ctx context.Context, name string) resource.T func testAccCheckCustomPluginDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_mskconnect_custom_plugin" { diff --git a/internal/service/kafkaconnect/exports_test.go b/internal/service/kafkaconnect/exports_test.go index 1fa6f1ef329..4dd85e17891 100644 --- a/internal/service/kafkaconnect/exports_test.go +++ b/internal/service/kafkaconnect/exports_test.go @@ -5,7 +5,9 @@ package kafkaconnect // Exports for use in tests only. var ( - ResourceConnector = resourceConnector + ResourceConnector = resourceConnector + ResourceCustomPlugin = resourceCustomPlugin - FindConnectorByARN = findConnectorByARN + FindConnectorByARN = findConnectorByARN + FindCustomPluginByARN = findCustomPluginByARN ) diff --git a/internal/service/kafkaconnect/find.go b/internal/service/kafkaconnect/find.go index 9571d70702f..e8b36840cdf 100644 --- a/internal/service/kafkaconnect/find.go +++ b/internal/service/kafkaconnect/find.go @@ -13,31 +13,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func FindCustomPluginByARN(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) (*kafkaconnect.DescribeCustomPluginOutput, error) { - input := &kafkaconnect.DescribeCustomPluginInput{ - CustomPluginArn: aws.String(arn), - } - - output, err := conn.DescribeCustomPluginWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - func FindWorkerConfigurationByARN(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { input := &kafkaconnect.DescribeWorkerConfigurationInput{ WorkerConfigurationArn: aws.String(arn), diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index 617233992e1..00506ff4684 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -60,8 +60,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceCustomPlugin, + Factory: resourceCustomPlugin, TypeName: "aws_mskconnect_custom_plugin", + Name: "Custom Plugin", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }, diff --git a/internal/service/kafkaconnect/status.go b/internal/service/kafkaconnect/status.go index 7cf710e3b67..fa684bea48d 100644 --- a/internal/service/kafkaconnect/status.go +++ b/internal/service/kafkaconnect/status.go @@ -12,22 +12,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func statusCustomPluginState(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindCustomPluginByARN(ctx, conn, arn) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.CustomPluginState), nil - } -} - func statusWorkerConfigurationState(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindWorkerConfigurationByARN(ctx, conn, arn) diff --git a/internal/service/kafkaconnect/wait.go b/internal/service/kafkaconnect/wait.go index 63bcea4b924..33a89ba747a 100644 --- a/internal/service/kafkaconnect/wait.go +++ b/internal/service/kafkaconnect/wait.go @@ -5,53 +5,12 @@ package kafkaconnect import ( "context" - "fmt" "time" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kafkaconnect" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func waitCustomPluginCreated(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeCustomPluginOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.CustomPluginStateCreating}, - Target: []string{kafkaconnect.CustomPluginStateActive}, - Refresh: statusCustomPluginState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeCustomPluginOutput); ok { - if state, stateDescription := aws.StringValue(output.CustomPluginState), output.StateDescription; state == kafkaconnect.CustomPluginStateCreateFailed && stateDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateDescription.Code), aws.StringValue(stateDescription.Message))) - } - - return output, err - } - - return nil, err -} - -func waitCustomPluginDeleted(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeCustomPluginOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.CustomPluginStateDeleting}, - Target: []string{}, - Refresh: statusCustomPluginState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeCustomPluginOutput); ok { - return output, err - } - - return nil, err -} - func waitWorkerConfigurationDeleted(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { stateConf := &retry.StateChangeConf{ Pending: []string{kafkaconnect.WorkerConfigurationStateDeleting}, From f0aed3e66074f45832275affbdcd540d2f77643e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 11:52:36 -0400 Subject: [PATCH 10/16] d/aws_mskconnect_custom_plugin: Migrate to AWS SDK for Go v2. --- .../kafkaconnect/custom_plugin_data_source.go | 87 +++++++++++-------- .../custom_plugin_data_source_test.go | 1 - .../kafkaconnect/service_package_gen.go | 3 +- 3 files changed, 52 insertions(+), 39 deletions(-) diff --git a/internal/service/kafkaconnect/custom_plugin_data_source.go b/internal/service/kafkaconnect/custom_plugin_data_source.go index f4dd6909226..ccde1fd7734 100644 --- a/internal/service/kafkaconnect/custom_plugin_data_source.go +++ b/internal/service/kafkaconnect/custom_plugin_data_source.go @@ -6,20 +6,22 @@ package kafkaconnect import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_mskconnect_custom_plugin") +// @SDKDataSource("aws_mskconnect_custom_plugin", name="Custom Plugin") // @Tags(identifierAttribute="arn") -func DataSourceCustomPlugin() *schema.Resource { +func dataSourceCustomPlugin() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceCustomPluginRead, @@ -51,45 +53,17 @@ func DataSourceCustomPlugin() *schema.Resource { func dataSourceCustomPluginRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - name := d.Get(names.AttrName) - var output []*kafkaconnect.CustomPluginSummary - - err := conn.ListCustomPluginsPagesWithContext(ctx, &kafkaconnect.ListCustomPluginsInput{}, func(page *kafkaconnect.ListCustomPluginsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.CustomPlugins { - if aws.StringValue(v.Name) == name { - output = append(output, v) - } - } - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing MSK Connect Custom Plugins: %s", err) - } - - if len(output) == 0 || output[0] == nil { - err = tfresource.NewEmptyResultError(name) - } else if count := len(output); count > 1 { - err = tfresource.NewTooManyResultsError(count, name) - } + plugin, err := findCustomPluginByName(ctx, conn, d.Get(names.AttrName).(string)) if err != nil { return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Connect Custom Plugin", err)) } - plugin := output[0] - - d.SetId(aws.StringValue(plugin.CustomPluginArn)) - - d.Set(names.AttrARN, plugin.CustomPluginArn) + arn := aws.ToString(plugin.CustomPluginArn) + d.SetId(arn) + d.Set(names.AttrARN, arn) d.Set(names.AttrDescription, plugin.Description) d.Set(names.AttrName, plugin.Name) d.Set(names.AttrState, plugin.CustomPluginState) @@ -102,3 +76,42 @@ func dataSourceCustomPluginRead(ctx context.Context, d *schema.ResourceData, met return diags } + +func findCustomPlugin(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListCustomPluginsInput, filter tfslices.Predicate[*awstypes.CustomPluginSummary]) (*awstypes.CustomPluginSummary, error) { + output, err := findCustomPlugins(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findCustomPlugins(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListCustomPluginsInput, filter tfslices.Predicate[*awstypes.CustomPluginSummary]) ([]awstypes.CustomPluginSummary, error) { + var output []awstypes.CustomPluginSummary + + pages := kafkaconnect.NewListCustomPluginsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.CustomPlugins { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func findCustomPluginByName(ctx context.Context, conn *kafkaconnect.Client, name string) (*awstypes.CustomPluginSummary, error) { + input := &kafkaconnect.ListCustomPluginsInput{} + + return findCustomPlugin(ctx, conn, input, func(v *awstypes.CustomPluginSummary) bool { + return aws.ToString(v.Name) == name + }) +} diff --git a/internal/service/kafkaconnect/custom_plugin_data_source_test.go b/internal/service/kafkaconnect/custom_plugin_data_source_test.go index 872c735aa5f..99e2112b156 100644 --- a/internal/service/kafkaconnect/custom_plugin_data_source_test.go +++ b/internal/service/kafkaconnect/custom_plugin_data_source_test.go @@ -23,7 +23,6 @@ func TestAccKafkaConnectCustomPluginDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index 00506ff4684..cb0cfb3433d 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -33,8 +33,9 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac }, }, { - Factory: DataSourceCustomPlugin, + Factory: dataSourceCustomPlugin, TypeName: "aws_mskconnect_custom_plugin", + Name: "Custom Plugin", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }, From f9660702b13e00769d91173ef8b9f5b7102ba8e6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 12:04:49 -0400 Subject: [PATCH 11/16] r/aws_mskconnect_worker_configuration: Migrate to AWS SDK for Go v2. --- internal/service/kafkaconnect/exports_test.go | 10 +- internal/service/kafkaconnect/find.go | 39 ------ .../kafkaconnect/service_package_gen.go | 3 +- internal/service/kafkaconnect/status.go | 29 ----- internal/service/kafkaconnect/wait.go | 29 ----- .../kafkaconnect/worker_configuration.go | 117 +++++++++++++----- .../kafkaconnect/worker_configuration_test.go | 17 +-- 7 files changed, 100 insertions(+), 144 deletions(-) delete mode 100644 internal/service/kafkaconnect/find.go delete mode 100644 internal/service/kafkaconnect/status.go delete mode 100644 internal/service/kafkaconnect/wait.go diff --git a/internal/service/kafkaconnect/exports_test.go b/internal/service/kafkaconnect/exports_test.go index 4dd85e17891..7b4d8ed191d 100644 --- a/internal/service/kafkaconnect/exports_test.go +++ b/internal/service/kafkaconnect/exports_test.go @@ -5,9 +5,11 @@ package kafkaconnect // Exports for use in tests only. var ( - ResourceConnector = resourceConnector - ResourceCustomPlugin = resourceCustomPlugin + ResourceConnector = resourceConnector + ResourceCustomPlugin = resourceCustomPlugin + ResourceWorkerConfiguration = resourceWorkerConfiguration - FindConnectorByARN = findConnectorByARN - FindCustomPluginByARN = findCustomPluginByARN + FindConnectorByARN = findConnectorByARN + FindCustomPluginByARN = findCustomPluginByARN + FindWorkerConfigurationByARN = findWorkerConfigurationByARN ) diff --git a/internal/service/kafkaconnect/find.go b/internal/service/kafkaconnect/find.go deleted file mode 100644 index e8b36840cdf..00000000000 --- a/internal/service/kafkaconnect/find.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kafkaconnect - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindWorkerConfigurationByARN(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { - input := &kafkaconnect.DescribeWorkerConfigurationInput{ - WorkerConfigurationArn: aws.String(arn), - } - - output, err := conn.DescribeWorkerConfigurationWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index cb0cfb3433d..5ad06945bf3 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -69,8 +69,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceWorkerConfiguration, + Factory: resourceWorkerConfiguration, TypeName: "aws_mskconnect_worker_configuration", + Name: "Worker Configuration", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }, diff --git a/internal/service/kafkaconnect/status.go b/internal/service/kafkaconnect/status.go deleted file mode 100644 index fa684bea48d..00000000000 --- a/internal/service/kafkaconnect/status.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kafkaconnect - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func statusWorkerConfigurationState(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindWorkerConfigurationByARN(ctx, conn, arn) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.WorkerConfigurationState), nil - } -} diff --git a/internal/service/kafkaconnect/wait.go b/internal/service/kafkaconnect/wait.go deleted file mode 100644 index 33a89ba747a..00000000000 --- a/internal/service/kafkaconnect/wait.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kafkaconnect - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -func waitWorkerConfigurationDeleted(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.WorkerConfigurationStateDeleting}, - Target: []string{}, - Refresh: statusWorkerConfigurationState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeWorkerConfigurationOutput); ok { - return output, err - } - - return nil, err -} diff --git a/internal/service/kafkaconnect/worker_configuration.go b/internal/service/kafkaconnect/worker_configuration.go index 7f76fee9b8a..c7c0c25ddeb 100644 --- a/internal/service/kafkaconnect/worker_configuration.go +++ b/internal/service/kafkaconnect/worker_configuration.go @@ -8,12 +8,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -23,9 +26,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_mskconnect_worker_configuration") +// @SDKResource("aws_mskconnect_worker_configuration", name="Worker Configuration") // @Tags(identifierAttribute="arn") -func ResourceWorkerConfiguration() *schema.Resource { +func resourceWorkerConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceWorkerConfigurationCreate, ReadWithoutTimeout: resourceWorkerConfigurationRead, @@ -82,8 +85,7 @@ func ResourceWorkerConfiguration() *schema.Resource { func resourceWorkerConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) name := d.Get(names.AttrName).(string) input := &kafkaconnect.CreateWorkerConfigurationInput{ @@ -96,24 +98,22 @@ func resourceWorkerConfigurationCreate(ctx context.Context, d *schema.ResourceDa input.Description = aws.String(v.(string)) } - log.Printf("[DEBUG] Creating MSK Connect Worker Configuration: %s", input) - output, err := conn.CreateWorkerConfigurationWithContext(ctx, input) + output, err := conn.CreateWorkerConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK Connect Worker Configuration (%s): %s", name, err) } - d.SetId(aws.StringValue(output.WorkerConfigurationArn)) + d.SetId(aws.ToString(output.WorkerConfigurationArn)) return append(diags, resourceWorkerConfigurationRead(ctx, d, meta)...) } func resourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - config, err := FindWorkerConfigurationByARN(ctx, conn, d.Id()) + config, err := findWorkerConfigurationByARN(ctx, conn, d.Id()) if tfresource.NotFound(err) && !d.IsNewResource() { log.Printf("[WARN] MSK Connect Worker Configuration (%s) not found, removing from state", d.Id()) @@ -131,7 +131,7 @@ func resourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceData if config.LatestRevision != nil { d.Set("latest_revision", config.LatestRevision.Revision) - d.Set("properties_file_content", decodePropertiesFileContent(aws.StringValue(config.LatestRevision.PropertiesFileContent))) + d.Set("properties_file_content", decodePropertiesFileContent(aws.ToString(config.LatestRevision.PropertiesFileContent))) } else { d.Set("latest_revision", nil) d.Set("properties_file_content", nil) @@ -143,31 +143,21 @@ func resourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceData func resourceWorkerConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - // This update function is for updating tags only - there is no update action for this resource + // This update function is for updating tags only - there is no update action for this resource. return append(diags, resourceWorkerConfigurationRead(ctx, d, meta)...) } -func decodePropertiesFileContent(content string) string { - v, err := itypes.Base64Decode(content) - if err != nil { - return content - } - - return string(v) -} - func resourceWorkerConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) log.Printf("[DEBUG] Deleting MSK Connect Worker Configuration: %s", d.Id()) - _, err := conn.DeleteWorkerConfigurationWithContext(ctx, &kafkaconnect.DeleteWorkerConfigurationInput{ + _, err := conn.DeleteWorkerConfiguration(ctx, &kafkaconnect.DeleteWorkerConfigurationInput{ WorkerConfigurationArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return diags } @@ -175,11 +165,76 @@ func resourceWorkerConfigurationDelete(ctx context.Context, d *schema.ResourceDa return sdkdiag.AppendErrorf(diags, "deleting MSK Connect Worker Configuration (%s): %s", d.Id(), err) } - _, err = waitWorkerConfigurationDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) - - if err != nil { + if _, err := waitWorkerConfigurationDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Worker Configuration (%s) delete: %s", d.Id(), err) } return diags } + +func findWorkerConfigurationByARN(ctx context.Context, conn *kafkaconnect.Client, arn string) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { + input := &kafkaconnect.DescribeWorkerConfigurationInput{ + WorkerConfigurationArn: aws.String(arn), + } + + output, err := conn.DescribeWorkerConfiguration(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusWorkerConfiguration(ctx context.Context, conn *kafkaconnect.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findWorkerConfigurationByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.WorkerConfigurationState), nil + } +} + +func waitWorkerConfigurationDeleted(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.WorkerConfigurationStateDeleting), + Target: []string{}, + Refresh: statusWorkerConfiguration(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeWorkerConfigurationOutput); ok { + return output, err + } + + return nil, err +} + +func decodePropertiesFileContent(content string) string { + v, err := itypes.Base64Decode(content) + if err != nil { + return content + } + + return string(v) +} diff --git a/internal/service/kafkaconnect/worker_configuration_test.go b/internal/service/kafkaconnect/worker_configuration_test.go index 939b05eb9e0..4681029e31c 100644 --- a/internal/service/kafkaconnect/worker_configuration_test.go +++ b/internal/service/kafkaconnect/worker_configuration_test.go @@ -8,7 +8,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafkaconnect" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -25,7 +24,7 @@ func TestAccKafkaConnectWorkerConfiguration_basic(t *testing.T) { resourceName := "aws_mskconnect_worker_configuration.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -55,7 +54,7 @@ func TestAccKafkaConnectWorkerConfiguration_disappears(t *testing.T) { resourceName := "aws_mskconnect_worker_configuration.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -78,7 +77,7 @@ func TestAccKafkaConnectWorkerConfiguration_description(t *testing.T) { resourceName := "aws_mskconnect_worker_configuration.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -105,7 +104,7 @@ func TestAccKafkaConnectWorkerConfiguration_tags(t *testing.T) { resourceName := "aws_mskconnect_worker_configuration.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -151,11 +150,7 @@ func testAccCheckWorkerConfigurationExists(ctx context.Context, n string) resour return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No MSK Connect Worker Configuration ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) _, err := tfkafkaconnect.FindWorkerConfigurationByARN(ctx, conn, rs.Primary.ID) @@ -165,7 +160,7 @@ func testAccCheckWorkerConfigurationExists(ctx context.Context, n string) resour func testAccCheckWorkerConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_mskconnect_worker_configuration" { From 6e2b0bb42c9966c99d3498f60f0b04a5155d4253 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 12:10:34 -0400 Subject: [PATCH 12/16] d/aws_mskconnect_worker_configuration: Migrate to AWS SDK for Go v2. --- .../kafkaconnect/service_package_gen.go | 3 +- .../worker_configuration_data_source.go | 91 +++++++++++-------- .../worker_configuration_data_source_test.go | 1 - 3 files changed, 55 insertions(+), 40 deletions(-) diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index 5ad06945bf3..5a4f586bc5b 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -41,8 +41,9 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac }, }, { - Factory: DataSourceWorkerConfiguration, + Factory: dataSourceWorkerConfiguration, TypeName: "aws_mskconnect_worker_configuration", + Name: "Worker Configuration", Tags: &types.ServicePackageResourceTags{ IdentifierAttribute: names.AttrARN, }, diff --git a/internal/service/kafkaconnect/worker_configuration_data_source.go b/internal/service/kafkaconnect/worker_configuration_data_source.go index 73c4d073e69..0086f510966 100644 --- a/internal/service/kafkaconnect/worker_configuration_data_source.go +++ b/internal/service/kafkaconnect/worker_configuration_data_source.go @@ -6,20 +6,22 @@ package kafkaconnect import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_mskconnect_worker_configuration") +// @SDKDataSource("aws_mskconnect_worker_configuration", name="Worker Configuration") // @Tags(identifierAttribute="arn") -func DataSourceWorkerConfiguration() *schema.Resource { +func dataSourceWorkerConfiguration() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceWorkerConfigurationRead, @@ -51,56 +53,30 @@ func DataSourceWorkerConfiguration() *schema.Resource { func dataSourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - name := d.Get(names.AttrName) - var output []*kafkaconnect.WorkerConfigurationSummary - - err := conn.ListWorkerConfigurationsPagesWithContext(ctx, &kafkaconnect.ListWorkerConfigurationsInput{}, func(page *kafkaconnect.ListWorkerConfigurationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.WorkerConfigurations { - if aws.StringValue(v.Name) == name { - output = append(output, v) - } - } - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing MSK Connect Worker Configurations: %s", err) - } - - if len(output) == 0 || output[0] == nil { - err = tfresource.NewEmptyResultError(name) - } else if count := len(output); count > 1 { - err = tfresource.NewTooManyResultsError(count, name) - } + output, err := findWorkerConfigurationByName(ctx, conn, d.Get(names.AttrName).(string)) if err != nil { return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Connect Worker Configuration", err)) } - arn := aws.StringValue(output[0].WorkerConfigurationArn) - config, err := FindWorkerConfigurationByARN(ctx, conn, arn) + arn := aws.ToString(output.WorkerConfigurationArn) + config, err := findWorkerConfigurationByARN(ctx, conn, arn) if err != nil { return sdkdiag.AppendErrorf(diags, "reading MSK Connect Worker Configuration (%s): %s", arn, err) } - d.SetId(aws.StringValue(config.Name)) - + name := aws.ToString(config.Name) + d.SetId(name) d.Set(names.AttrARN, config.WorkerConfigurationArn) d.Set(names.AttrDescription, config.Description) - d.Set(names.AttrName, config.Name) + d.Set(names.AttrName, name) if config.LatestRevision != nil { d.Set("latest_revision", config.LatestRevision.Revision) - d.Set("properties_file_content", decodePropertiesFileContent(aws.StringValue(config.LatestRevision.PropertiesFileContent))) + d.Set("properties_file_content", decodePropertiesFileContent(aws.ToString(config.LatestRevision.PropertiesFileContent))) } else { d.Set("latest_revision", nil) d.Set("properties_file_content", nil) @@ -108,3 +84,42 @@ func dataSourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceDa return diags } + +func findWorkerConfiguration(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListWorkerConfigurationsInput, filter tfslices.Predicate[*awstypes.WorkerConfigurationSummary]) (*awstypes.WorkerConfigurationSummary, error) { + output, err := findWorkerConfigurations(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findWorkerConfigurations(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListWorkerConfigurationsInput, filter tfslices.Predicate[*awstypes.WorkerConfigurationSummary]) ([]awstypes.WorkerConfigurationSummary, error) { + var output []awstypes.WorkerConfigurationSummary + + pages := kafkaconnect.NewListWorkerConfigurationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.WorkerConfigurations { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func findWorkerConfigurationByName(ctx context.Context, conn *kafkaconnect.Client, name string) (*awstypes.WorkerConfigurationSummary, error) { + input := &kafkaconnect.ListWorkerConfigurationsInput{} + + return findWorkerConfiguration(ctx, conn, input, func(v *awstypes.WorkerConfigurationSummary) bool { + return aws.ToString(v.Name) == name + }) +} diff --git a/internal/service/kafkaconnect/worker_configuration_data_source_test.go b/internal/service/kafkaconnect/worker_configuration_data_source_test.go index 9392249f2d7..d7308c38de6 100644 --- a/internal/service/kafkaconnect/worker_configuration_data_source_test.go +++ b/internal/service/kafkaconnect/worker_configuration_data_source_test.go @@ -23,7 +23,6 @@ func TestAccKafkaConnectWorkerConfigurationDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { From e1a3f943a182addf4875dec62f6fba6b8570511a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 12:13:47 -0400 Subject: [PATCH 13/16] kafkaconnect: Migrate sweepers to AWS SDK for Go v2. --- internal/service/kafkaconnect/sweep.go | 68 ++++++++++++-------------- 1 file changed, 31 insertions(+), 37 deletions(-) diff --git a/internal/service/kafkaconnect/sweep.go b/internal/service/kafkaconnect/sweep.go index 37214bf32cc..2b11ab3dd19 100644 --- a/internal/service/kafkaconnect/sweep.go +++ b/internal/service/kafkaconnect/sweep.go @@ -7,11 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -35,33 +35,30 @@ func sweepConnectors(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.KafkaConnectConn(ctx) + conn := client.KafkaConnectClient(ctx) input := &kafkaconnect.ListConnectorsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListConnectorsPagesWithContext(ctx, input, func(page *kafkaconnect.ListConnectorsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := kafkaconnect.NewListConnectorsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping MSK Connect Connector sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing MSK Connect Connectors (%s): %w", region, err) } for _, v := range page.Connectors { - r := ResourceConnector() + r := resourceConnector() d := r.Data(nil) - d.SetId(aws.StringValue(v.ConnectorArn)) + d.SetId(aws.ToString(v.ConnectorArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping MSK Connect Connector sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing MSK Connect Connectors (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -79,33 +76,30 @@ func sweepCustomPlugins(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.KafkaConnectConn(ctx) + conn := client.KafkaConnectClient(ctx) input := &kafkaconnect.ListCustomPluginsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListCustomPluginsPagesWithContext(ctx, input, func(page *kafkaconnect.ListCustomPluginsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := kafkaconnect.NewListCustomPluginsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping MSK Connect Custom Plugin sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing MSK Connect Custom Plugins (%s): %w", region, err) } for _, v := range page.CustomPlugins { - r := ResourceCustomPlugin() + r := resourceCustomPlugin() d := r.Data(nil) - d.SetId(aws.StringValue(v.CustomPluginArn)) + d.SetId(aws.ToString(v.CustomPluginArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping MSK Connect Custom Plugin sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing MSK Connect Custom Plugins (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) From 7d59fa65b7f63ee7fc1b1835dbfda044ccfd7410 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 12:19:31 -0400 Subject: [PATCH 14/16] Fix semgrep 'ci.semgrep.migrate.aws-api-context'. --- .ci/semgrep/migrate/context.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/semgrep/migrate/context.yml b/.ci/semgrep/migrate/context.yml index 571b98a0343..8bdc830bc96 100644 --- a/.ci/semgrep/migrate/context.yml +++ b/.ci/semgrep/migrate/context.yml @@ -30,6 +30,7 @@ rules: - pattern-not: conn.Options() - pattern-not: codestarconnections_sdkv2.$API() - pattern-not: connectcases_sdkv2.$API() + - pattern-not: kafkaconnect_sdkv2.$API() - pattern-not: mediaconnect_sdkv2.$API() - pattern-not: pcaconnectorad_sdkv2.$API() severity: ERROR From b2783025fd008a41dde20ff55c90bdf08d646ce1 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 15:55:54 -0400 Subject: [PATCH 15/16] r/aws_mskconnect_worker_configuration: Add sweeper. --- internal/service/kafkaconnect/sweep.go | 49 ++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/internal/service/kafkaconnect/sweep.go b/internal/service/kafkaconnect/sweep.go index 2b11ab3dd19..8c957e6e321 100644 --- a/internal/service/kafkaconnect/sweep.go +++ b/internal/service/kafkaconnect/sweep.go @@ -27,6 +27,14 @@ func RegisterSweepers() { "aws_mskconnect_connector", }, }) + + resource.AddTestSweepers("aws_mskconnect_worker_configuration", &resource.Sweeper{ + Name: "aws_mskconnect_worker_configuration", + F: sweepWorkerConfigurations, + Dependencies: []string{ + "aws_mskconnect_connector", + }, + }) } func sweepConnectors(region string) error { @@ -110,3 +118,44 @@ func sweepCustomPlugins(region string) error { return nil } + +func sweepWorkerConfigurations(region string) error { + ctx := sweep.Context(region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.KafkaConnectClient(ctx) + input := &kafkaconnect.ListWorkerConfigurationsInput{} + sweepResources := make([]sweep.Sweepable, 0) + + pages := kafkaconnect.NewListWorkerConfigurationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping MSK Connect Worker Configuration sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing MSK Connect Worker Configurations (%s): %w", region, err) + } + + for _, v := range page.WorkerConfigurations { + r := resourceWorkerConfiguration() + d := r.Data(nil) + d.SetId(aws.ToString(v.WorkerConfigurationArn)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping MSK Connect Worker Configurations (%s): %w", region, err) + } + + return nil +} From e649c0c524076e80bcfe81a6af29689d82b7887d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 8 Jul 2024 16:47:47 -0400 Subject: [PATCH 16/16] r/aws_mskconnect_connector: Fix `interface conversion: interface {} is nil, not map[string]interface {}` panic when `log_delivery.worker_log_delivery` is empty (`{}`). --- .changelog/38270.txt | 4 +++ internal/service/kafkaconnect/connector.go | 30 +++++++++++----------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/.changelog/38270.txt b/.changelog/38270.txt index e00bc9d89a7..37dd0768010 100644 --- a/.changelog/38270.txt +++ b/.changelog/38270.txt @@ -25,3 +25,7 @@ data-source/aws_mskconnect_custom_plugin: Add `tags` attribute ```release-note:enhancement data-source/aws_mskconnect_worker_configuration: Add `tags` attribute ``` + +```release-note:bug +resource/aws_mskconnect_connector: Fix `interface conversion: interface {} is nil, not map[string]interface {}` panic when `log_delivery.worker_log_delivery` is empty (`{}`) +``` \ No newline at end of file diff --git a/internal/service/kafkaconnect/connector.go b/internal/service/kafkaconnect/connector.go index 5fe6cb27200..50657436bc5 100644 --- a/internal/service/kafkaconnect/connector.go +++ b/internal/service/kafkaconnect/connector.go @@ -674,11 +674,11 @@ func expandCapacity(tfMap map[string]interface{}) *awstypes.Capacity { apiObject := &awstypes.Capacity{} - if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.AutoScaling = expandAutoScaling(v[0].(map[string]interface{})) } - if v, ok := tfMap["provisioned_capacity"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["provisioned_capacity"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ProvisionedCapacity = expandProvisionedCapacity(v[0].(map[string]interface{})) } @@ -704,11 +704,11 @@ func expandAutoScaling(tfMap map[string]interface{}) *awstypes.AutoScaling { apiObject.MinWorkerCount = int32(v) } - if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ScaleInPolicy = expandScaleInPolicy(v[0].(map[string]interface{})) } - if v, ok := tfMap["scale_out_policy"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["scale_out_policy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ScaleOutPolicy = expandScaleOutPolicy(v[0].(map[string]interface{})) } @@ -768,11 +768,11 @@ func expandCapacityUpdate(tfMap map[string]interface{}) *awstypes.CapacityUpdate apiObject := &awstypes.CapacityUpdate{} - if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.AutoScaling = expandAutoScalingUpdate(v[0].(map[string]interface{})) } - if v, ok := tfMap["provisioned_capacity"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["provisioned_capacity"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ProvisionedCapacity = expandProvisionedCapacityUpdate(v[0].(map[string]interface{})) } @@ -798,11 +798,11 @@ func expandAutoScalingUpdate(tfMap map[string]interface{}) *awstypes.AutoScaling apiObject.MinWorkerCount = int32(v) } - if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ScaleInPolicy = expandScaleInPolicyUpdate(v[0].(map[string]interface{})) } - if v, ok := tfMap["scale_out_policy"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["scale_out_policy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ScaleOutPolicy = expandScaleOutPolicyUpdate(v[0].(map[string]interface{})) } @@ -862,7 +862,7 @@ func expandCluster(tfMap map[string]interface{}) *awstypes.KafkaCluster { apiObject := &awstypes.KafkaCluster{} - if v, ok := tfMap["apache_kafka_cluster"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["apache_kafka_cluster"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ApacheKafkaCluster = expandApacheCluster(v[0].(map[string]interface{})) } @@ -880,7 +880,7 @@ func expandApacheCluster(tfMap map[string]interface{}) *awstypes.ApacheKafkaClus apiObject.BootstrapServers = aws.String(v) } - if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.Vpc = expandVPC(v[0].(map[string]interface{})) } @@ -940,7 +940,7 @@ func expandPlugin(tfMap map[string]interface{}) *awstypes.Plugin { apiObject := &awstypes.Plugin{} - if v, ok := tfMap["custom_plugin"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["custom_plugin"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.CustomPlugin = expandCustomPlugin(v[0].(map[string]interface{})) } @@ -997,7 +997,7 @@ func expandLogDelivery(tfMap map[string]interface{}) *awstypes.LogDelivery { apiObject := &awstypes.LogDelivery{} - if v, ok := tfMap["worker_log_delivery"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["worker_log_delivery"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.WorkerLogDelivery = expandWorkerLogDelivery(v[0].(map[string]interface{})) } @@ -1011,15 +1011,15 @@ func expandWorkerLogDelivery(tfMap map[string]interface{}) *awstypes.WorkerLogDe apiObject := &awstypes.WorkerLogDelivery{} - if v, ok := tfMap[names.AttrCloudWatchLogs].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap[names.AttrCloudWatchLogs].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.CloudWatchLogs = expandCloudWatchLogsLogDelivery(v[0].(map[string]interface{})) } - if v, ok := tfMap["firehose"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["firehose"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.Firehose = expandFirehoseLogDelivery(v[0].(map[string]interface{})) } - if v, ok := tfMap["s3"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["s3"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.S3 = expandS3LogDelivery(v[0].(map[string]interface{})) }