diff --git a/.changelog/32484.txt b/.changelog/32484.txt new file mode 100644 index 00000000000..d6453bb6d9f --- /dev/null +++ b/.changelog/32484.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_msk_bootstrap_brokers +``` \ No newline at end of file diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index d69c7428105..e3f86aa9839 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -179,7 +179,6 @@ import ( iotanalytics_sdkv1 "github.com/aws/aws-sdk-go/service/iotanalytics" iotevents_sdkv1 "github.com/aws/aws-sdk-go/service/iotevents" ivs_sdkv1 "github.com/aws/aws-sdk-go/service/ivs" - kafka_sdkv1 "github.com/aws/aws-sdk-go/service/kafka" kafkaconnect_sdkv1 "github.com/aws/aws-sdk-go/service/kafkaconnect" kinesisanalytics_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisanalytics" kinesisanalyticsv2_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisanalyticsv2" @@ -721,10 +720,6 @@ func (c *AWSClient) KMSConn(ctx context.Context) *kms_sdkv1.KMS { return errs.Must(conn[*kms_sdkv1.KMS](ctx, c, names.KMS, make(map[string]any))) } -func (c *AWSClient) KafkaConn(ctx context.Context) *kafka_sdkv1.Kafka { - return errs.Must(conn[*kafka_sdkv1.Kafka](ctx, c, names.Kafka, make(map[string]any))) -} - func (c *AWSClient) KafkaClient(ctx context.Context) *kafka_sdkv2.Client { return errs.Must(client[*kafka_sdkv2.Client](ctx, c, names.Kafka, make(map[string]any))) } diff --git a/internal/service/kafka/bootstrap_brokers_data_source.go b/internal/service/kafka/bootstrap_brokers_data_source.go new file mode 100644 index 00000000000..959c86a55e2 --- /dev/null +++ b/internal/service/kafka/bootstrap_brokers_data_source.go @@ -0,0 +1,96 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kafka + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +// @SDKDataSource("aws_msk_bootstrap_brokers", name="Bootstrap Brokers") +func dataSourceBootstrapBrokers() *schema.Resource { + return &schema.Resource{ + ReadWithoutTimeout: dataSourceBootstrapBrokersRead, + + Schema: map[string]*schema.Schema{ + "bootstrap_brokers": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_public_sasl_iam": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_public_sasl_scram": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_public_tls": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_sasl_iam": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_sasl_scram": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_tls": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_vpc_connectivity_sasl_iam": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_vpc_connectivity_sasl_scram": { + Type: schema.TypeString, + Computed: true, + }, + "bootstrap_brokers_vpc_connectivity_tls": { + Type: schema.TypeString, + Computed: true, + }, + "cluster_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + }, + } +} + +func dataSourceBootstrapBrokersRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaClient(ctx) + + clusterARN := d.Get("cluster_arn").(string) + output, err := findBootstrapBrokersByARN(ctx, conn, clusterARN) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading MSK Cluster (%s) bootstrap brokers: %s", clusterARN, err) + } + + d.SetId(clusterARN) + d.Set("bootstrap_brokers", SortEndpointsString(aws.ToString(output.BootstrapBrokerString))) + d.Set("bootstrap_brokers_public_sasl_iam", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringPublicSaslIam))) + d.Set("bootstrap_brokers_public_sasl_scram", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringPublicSaslScram))) + d.Set("bootstrap_brokers_public_tls", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringPublicTls))) + d.Set("bootstrap_brokers_sasl_iam", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringSaslIam))) + d.Set("bootstrap_brokers_sasl_scram", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringSaslScram))) + d.Set("bootstrap_brokers_tls", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringTls))) + d.Set("bootstrap_brokers_vpc_connectivity_sasl_iam", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringVpcConnectivitySaslIam))) + d.Set("bootstrap_brokers_vpc_connectivity_sasl_scram", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringVpcConnectivitySaslScram))) + d.Set("bootstrap_brokers_vpc_connectivity_tls", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringVpcConnectivityTls))) + + return diags +} diff --git a/internal/service/kafka/bootstrap_brokers_data_source_test.go b/internal/service/kafka/bootstrap_brokers_data_source_test.go new file mode 100644 index 00000000000..7523b7534e9 --- /dev/null +++ b/internal/service/kafka/bootstrap_brokers_data_source_test.go @@ -0,0 +1,74 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kafka_test + +import ( + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccKafkaBootstrapBrokersDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_msk_bootstrap_brokers.test" + resourceName := "aws_msk_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccBootstrapBrokersDataSourceConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers", resourceName, "bootstrap_brokers"), + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_public_sasl_iam", resourceName, "bootstrap_brokers_public_sasl_iam"), + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_public_sasl_scram", resourceName, "bootstrap_brokers_public_sasl_scram"), + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_public_tls", resourceName, "bootstrap_brokers_public_tls"), + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_sasl_iam", resourceName, "bootstrap_brokers_sasl_iam"), + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_sasl_scram", resourceName, "bootstrap_brokers_sasl_scram"), + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_tls", resourceName, "bootstrap_brokers_tls"), + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_vpc_connectivity_sasl_iam", resourceName, "bootstrap_brokers_vpc_connectivity_sasl_iam"), + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_vpc_connectivity_sasl_scram", resourceName, "bootstrap_brokers_vpc_connectivity_sasl_scram"), + resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_vpc_connectivity_tls", resourceName, "bootstrap_brokers_vpc_connectivity_tls"), + ), + }, + }, + }) +} + +func testAccBootstrapBrokersDataSourceConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccClusterConfig_base(rName), fmt.Sprintf(` +resource "aws_msk_cluster" "test" { + cluster_name = %[1]q + kafka_version = "2.8.1" + number_of_broker_nodes = 3 + + broker_node_group_info { + client_subnets = aws_subnet.test[*].id + instance_type = "kafka.t3.small" + security_groups = [aws_security_group.test.id] + + storage_info { + ebs_storage_info { + volume_size = 10 + } + } + } + + tags = { + Name = %[1]q + } +} + +data "aws_msk_bootstrap_brokers" "test" { + cluster_arn = aws_msk_cluster.test.arn +} +`, rName)) +} diff --git a/internal/service/kafka/broker_nodes_data_source.go b/internal/service/kafka/broker_nodes_data_source.go index 714ba400727..6bd13319894 100644 --- a/internal/service/kafka/broker_nodes_data_source.go +++ b/internal/service/kafka/broker_nodes_data_source.go @@ -7,8 +7,9 @@ import ( "context" "sort" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -16,8 +17,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -// @SDKDataSource("aws_msk_broker_nodes") -func DataSourceBrokerNodes() *schema.Resource { +// @SDKDataSource("aws_msk_broker_nodes", name="Broker Nodes") +func dataSourceBrokerNodes() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceBrokerNodesRead, @@ -27,7 +28,6 @@ func DataSourceBrokerNodes() *schema.Resource { Required: true, ValidateFunc: verify.ValidARN, }, - "node_info_list": { Type: schema.TypeList, Computed: true, @@ -67,53 +67,48 @@ func DataSourceBrokerNodes() *schema.Resource { func dataSourceBrokerNodesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) clusterARN := d.Get("cluster_arn").(string) input := &kafka.ListNodesInput{ ClusterArn: aws.String(clusterARN), } - var nodeInfos []*kafka.NodeInfo + var nodeInfos []types.NodeInfo + + pages := kafka.NewListNodesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - err := conn.ListNodesPagesWithContext(ctx, input, func(page *kafka.ListNodesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing MSK Cluster (%s) Broker Nodes: %s", clusterARN, err) } nodeInfos = append(nodeInfos, page.NodeInfoList...) - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing MSK Cluster (%s) Broker Nodes: %s", clusterARN, err) } // node list is returned unsorted sort on broker id sort.Slice(nodeInfos, func(i, j int) bool { - iBrokerId := aws.Float64Value(nodeInfos[i].BrokerNodeInfo.BrokerId) - jBrokerId := aws.Float64Value(nodeInfos[j].BrokerNodeInfo.BrokerId) + iBrokerId := aws.ToFloat64(nodeInfos[i].BrokerNodeInfo.BrokerId) + jBrokerId := aws.ToFloat64(nodeInfos[j].BrokerNodeInfo.BrokerId) return iBrokerId < jBrokerId }) - tfList := make([]interface{}, len(nodeInfos)) - - for i, apiObject := range nodeInfos { - brokerNodeInfo := apiObject.BrokerNodeInfo - tfMap := map[string]interface{}{ - "attached_eni_id": aws.StringValue(brokerNodeInfo.AttachedENIId), - "broker_id": aws.Float64Value(brokerNodeInfo.BrokerId), - "client_subnet": aws.StringValue(brokerNodeInfo.ClientSubnet), - "client_vpc_ip_address": aws.StringValue(brokerNodeInfo.ClientVpcIpAddress), - "endpoints": aws.StringValueSlice(brokerNodeInfo.Endpoints), - "node_arn": aws.StringValue(apiObject.NodeARN), + tfList := []interface{}{} + for _, apiObject := range nodeInfos { + if brokerNodeInfo := apiObject.BrokerNodeInfo; brokerNodeInfo != nil { + tfMap := map[string]interface{}{ + "attached_eni_id": aws.ToString(brokerNodeInfo.AttachedENIId), + "broker_id": aws.ToFloat64(brokerNodeInfo.BrokerId), + "client_subnet": aws.ToString(brokerNodeInfo.ClientSubnet), + "client_vpc_ip_address": aws.ToString(brokerNodeInfo.ClientVpcIpAddress), + "endpoints": brokerNodeInfo.Endpoints, + "node_arn": aws.ToString(apiObject.NodeARN), + } + tfList = append(tfList, tfMap) } - - tfList[i] = tfMap } d.SetId(clusterARN) - if err := d.Set("node_info_list", tfList); err != nil { return sdkdiag.AppendErrorf(diags, "setting node_info_list: %s", err) } diff --git a/internal/service/kafka/broker_nodes_data_source_test.go b/internal/service/kafka/broker_nodes_data_source_test.go index 8d371cf5775..88935b5b7d7 100644 --- a/internal/service/kafka/broker_nodes_data_source_test.go +++ b/internal/service/kafka/broker_nodes_data_source_test.go @@ -7,10 +7,10 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafka" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccKafkaBrokerNodesDataSource_basic(t *testing.T) { @@ -21,9 +21,8 @@ func TestAccKafkaBrokerNodesDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccBrokerNodesDataSourceConfig_basic(rName), diff --git a/internal/service/kafka/cluster.go b/internal/service/kafka/cluster.go index 8a2330c6f68..654cdf8b967 100644 --- a/internal/service/kafka/cluster.go +++ b/internal/service/kafka/cluster.go @@ -10,15 +10,18 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -29,7 +32,7 @@ import ( // @SDKResource("aws_msk_cluster", name="Cluster") // @Tags(identifierAttribute="id") -func ResourceCluster() *schema.Resource { +func resourceCluster() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceClusterCreate, ReadWithoutTimeout: resourceClusterRead, @@ -106,11 +109,11 @@ func ResourceCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "az_distribution": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: kafka.BrokerAZDistributionDefault, - ValidateFunc: validation.StringInSlice(kafka.BrokerAZDistribution_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: types.BrokerAZDistributionDefault, + ValidateDiagFunc: enum.Validate[types.BrokerAZDistribution](), }, "client_subnets": { Type: schema.TypeSet, @@ -180,10 +183,10 @@ func ResourceCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(PublicAccessType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[publicAccessType](), }, }, }, @@ -356,10 +359,10 @@ func ResourceCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "client_broker": { - Type: schema.TypeString, - Optional: true, - Default: kafka.ClientBrokerTls, - ValidateFunc: validation.StringInSlice(kafka.ClientBroker_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.ClientBrokerTls, + ValidateDiagFunc: enum.Validate[types.ClientBroker](), }, "in_cluster": { Type: schema.TypeBool, @@ -376,10 +379,10 @@ func ResourceCluster() *schema.Resource { DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, }, "enhanced_monitoring": { - Type: schema.TypeString, - Optional: true, - Default: kafka.EnhancedMonitoringDefault, - ValidateFunc: validation.StringInSlice(kafka.EnhancedMonitoring_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: types.EnhancedMonitoringDefault, + ValidateDiagFunc: enum.Validate[types.EnhancedMonitoring](), }, "kafka_version": { Type: schema.TypeString, @@ -515,10 +518,10 @@ func ResourceCluster() *schema.Resource { }, }, "storage_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(kafka.StorageMode_Values(), true), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[types.StorageMode](), }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), @@ -536,18 +539,17 @@ func ResourceCluster() *schema.Resource { func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) name := d.Get("cluster_name").(string) input := &kafka.CreateClusterInput{ ClusterName: aws.String(name), KafkaVersion: aws.String(d.Get("kafka_version").(string)), - NumberOfBrokerNodes: aws.Int64(int64(d.Get("number_of_broker_nodes").(int))), + NumberOfBrokerNodes: aws.Int32(int32(d.Get("number_of_broker_nodes").(int))), Tags: getTagsIn(ctx), } - var vpcConnectivity *kafka.VpcConnectivity + var vpcConnectivity *types.VpcConnectivity if v, ok := d.GetOk("broker_node_group_info"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.BrokerNodeGroupInfo = expandBrokerNodeGroupInfo(v.([]interface{})[0].(map[string]interface{})) // "BadRequestException: When creating a cluster, all vpcConnectivity auth schemes must be disabled (‘enabled’ : false). You can enable auth schemes after the cluster is created" @@ -570,7 +572,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("enhanced_monitoring"); ok { - input.EnhancedMonitoring = aws.String(v.(string)) + input.EnhancedMonitoring = types.EnhancedMonitoring(v.(string)) } if v, ok := d.GetOk("logging_info"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -582,16 +584,16 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("storage_mode"); ok { - input.StorageMode = aws.String(v.(string)) + input.StorageMode = types.StorageMode(v.(string)) } - output, err := conn.CreateClusterWithContext(ctx, input) + output, err := conn.CreateCluster(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK Cluster (%s): %s", name, err) } - d.SetId(aws.StringValue(output.ClusterArn)) + d.SetId(aws.ToString(output.ClusterArn)) cluster, err := waitClusterCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) @@ -602,19 +604,19 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int if vpcConnectivity != nil { input := &kafka.UpdateConnectivityInput{ ClusterArn: aws.String(d.Id()), - ConnectivityInfo: &kafka.ConnectivityInfo{ + ConnectivityInfo: &types.ConnectivityInfo{ VpcConnectivity: vpcConnectivity, }, CurrentVersion: cluster.CurrentVersion, } - output, err := conn.UpdateConnectivityWithContext(ctx, input) + output, err := conn.UpdateConnectivity(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Cluster (%s) broker connectivity: %s", d.Id(), err) } - clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + clusterOperationARN := aws.ToString(output.ClusterOperationArn) if _, err := waitClusterOperationCompleted(ctx, conn, clusterOperationARN, d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Cluster (%s) operation (%s) complete: %s", d.Id(), clusterOperationARN, err) @@ -626,10 +628,9 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConn(ctx) - - cluster, err := FindClusterByARN(ctx, conn, d.Id()) + cluster, err := findClusterByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] MSK Cluster (%s) not found, removing from state", d.Id()) @@ -641,26 +642,24 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter return sdkdiag.AppendErrorf(diags, "reading MSK Cluster (%s): %s", d.Id(), err) } - output, err := conn.GetBootstrapBrokersWithContext(ctx, &kafka.GetBootstrapBrokersInput{ - ClusterArn: aws.String(d.Id()), - }) + output, err := findBootstrapBrokersByARN(ctx, conn, d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading MSK Cluster (%s) bootstrap brokers: %s", d.Id(), err) } - clusterARN := aws.StringValue(cluster.ClusterArn) + clusterARN := aws.ToString(cluster.ClusterArn) d.Set("arn", clusterARN) - d.Set("bootstrap_brokers", SortEndpointsString(aws.StringValue(output.BootstrapBrokerString))) - d.Set("bootstrap_brokers_public_sasl_iam", SortEndpointsString(aws.StringValue(output.BootstrapBrokerStringPublicSaslIam))) - d.Set("bootstrap_brokers_public_sasl_scram", SortEndpointsString(aws.StringValue(output.BootstrapBrokerStringPublicSaslScram))) - d.Set("bootstrap_brokers_public_tls", SortEndpointsString(aws.StringValue(output.BootstrapBrokerStringPublicTls))) - d.Set("bootstrap_brokers_sasl_iam", SortEndpointsString(aws.StringValue(output.BootstrapBrokerStringSaslIam))) - d.Set("bootstrap_brokers_sasl_scram", SortEndpointsString(aws.StringValue(output.BootstrapBrokerStringSaslScram))) - d.Set("bootstrap_brokers_tls", SortEndpointsString(aws.StringValue(output.BootstrapBrokerStringTls))) - d.Set("bootstrap_brokers_vpc_connectivity_sasl_iam", SortEndpointsString(aws.StringValue(output.BootstrapBrokerStringVpcConnectivitySaslIam))) - d.Set("bootstrap_brokers_vpc_connectivity_sasl_scram", SortEndpointsString(aws.StringValue(output.BootstrapBrokerStringVpcConnectivitySaslScram))) - d.Set("bootstrap_brokers_vpc_connectivity_tls", SortEndpointsString(aws.StringValue(output.BootstrapBrokerStringVpcConnectivityTls))) + d.Set("bootstrap_brokers", SortEndpointsString(aws.ToString(output.BootstrapBrokerString))) + d.Set("bootstrap_brokers_public_sasl_iam", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringPublicSaslIam))) + d.Set("bootstrap_brokers_public_sasl_scram", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringPublicSaslScram))) + d.Set("bootstrap_brokers_public_tls", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringPublicTls))) + d.Set("bootstrap_brokers_sasl_iam", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringSaslIam))) + d.Set("bootstrap_brokers_sasl_scram", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringSaslScram))) + d.Set("bootstrap_brokers_tls", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringTls))) + d.Set("bootstrap_brokers_vpc_connectivity_sasl_iam", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringVpcConnectivitySaslIam))) + d.Set("bootstrap_brokers_vpc_connectivity_sasl_scram", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringVpcConnectivitySaslScram))) + d.Set("bootstrap_brokers_vpc_connectivity_tls", SortEndpointsString(aws.ToString(output.BootstrapBrokerStringVpcConnectivityTls))) if cluster.BrokerNodeGroupInfo != nil { if err := d.Set("broker_node_group_info", []interface{}{flattenBrokerNodeGroupInfo(cluster.BrokerNodeGroupInfo)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting broker_node_group_info: %s", err) @@ -711,8 +710,8 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("open_monitoring", nil) } d.Set("storage_mode", cluster.StorageMode) - d.Set("zookeeper_connect_string", SortEndpointsString(aws.StringValue(cluster.ZookeeperConnectString))) - d.Set("zookeeper_connect_string_tls", SortEndpointsString(aws.StringValue(cluster.ZookeeperConnectStringTls))) + d.Set("zookeeper_connect_string", SortEndpointsString(aws.ToString(cluster.ZookeeperConnectString))) + d.Set("zookeeper_connect_string_tls", SortEndpointsString(aws.ToString(cluster.ZookeeperConnectStringTls))) setTagsOut(ctx, cluster.Tags) @@ -721,8 +720,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) if d.HasChange("broker_node_group_info.0.connectivity_info") { input := &kafka.UpdateConnectivityInput{ @@ -734,13 +732,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int input.ConnectivityInfo = expandConnectivityInfo(v.([]interface{})[0].(map[string]interface{})) } - output, err := conn.UpdateConnectivityWithContext(ctx, input) + output, err := conn.UpdateConnectivity(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Cluster (%s) broker connectivity: %s", d.Id(), err) } - clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + clusterOperationARN := aws.ToString(output.ClusterOperationArn) if _, err := waitClusterOperationCompleted(ctx, conn, clusterOperationARN, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Cluster (%s) operation (%s) complete: %s", d.Id(), clusterOperationARN, err) @@ -759,13 +757,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int TargetInstanceType: aws.String(d.Get("broker_node_group_info.0.instance_type").(string)), } - output, err := conn.UpdateBrokerTypeWithContext(ctx, input) + output, err := conn.UpdateBrokerType(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Cluster (%s) broker type: %s", d.Id(), err) } - clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + clusterOperationARN := aws.ToString(output.ClusterOperationArn) if _, err := waitClusterOperationCompleted(ctx, conn, clusterOperationARN, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Cluster (%s) operation (%s) complete: %s", d.Id(), clusterOperationARN, err) @@ -781,9 +779,9 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int input := &kafka.UpdateBrokerStorageInput{ ClusterArn: aws.String(d.Id()), CurrentVersion: aws.String(d.Get("current_version").(string)), - TargetBrokerEBSVolumeInfo: []*kafka.BrokerEBSVolumeInfo{{ + TargetBrokerEBSVolumeInfo: []types.BrokerEBSVolumeInfo{{ KafkaBrokerNodeId: aws.String("All"), - VolumeSizeGB: aws.Int64(int64(d.Get("broker_node_group_info.0.storage_info.0.ebs_storage_info.0.volume_size").(int))), + VolumeSizeGB: aws.Int32(int32(d.Get("broker_node_group_info.0.storage_info.0.ebs_storage_info.0.volume_size").(int))), }}, } @@ -791,13 +789,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int input.TargetBrokerEBSVolumeInfo[0].ProvisionedThroughput = expandProvisionedThroughput(v.([]interface{})[0].(map[string]interface{})) } - output, err := conn.UpdateBrokerStorageWithContext(ctx, input) + output, err := conn.UpdateBrokerStorage(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Cluster (%s) broker storage: %s", d.Id(), err) } - clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + clusterOperationARN := aws.ToString(output.ClusterOperationArn) if _, err := waitClusterOperationCompleted(ctx, conn, clusterOperationARN, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Cluster (%s) operation (%s) complete: %s", d.Id(), clusterOperationARN, err) @@ -813,16 +811,16 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int input := &kafka.UpdateBrokerCountInput{ ClusterArn: aws.String(d.Id()), CurrentVersion: aws.String(d.Get("current_version").(string)), - TargetNumberOfBrokerNodes: aws.Int64(int64(d.Get("number_of_broker_nodes").(int))), + TargetNumberOfBrokerNodes: aws.Int32(int32(d.Get("number_of_broker_nodes").(int))), } - output, err := conn.UpdateBrokerCountWithContext(ctx, input) + output, err := conn.UpdateBrokerCount(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Cluster (%s) broker count: %s", d.Id(), err) } - clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + clusterOperationARN := aws.ToString(output.ClusterOperationArn) if _, err := waitClusterOperationCompleted(ctx, conn, clusterOperationARN, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Cluster (%s) operation (%s) complete: %s", d.Id(), clusterOperationARN, err) @@ -838,7 +836,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int input := &kafka.UpdateMonitoringInput{ ClusterArn: aws.String(d.Id()), CurrentVersion: aws.String(d.Get("current_version").(string)), - EnhancedMonitoring: aws.String(d.Get("enhanced_monitoring").(string)), + EnhancedMonitoring: types.EnhancedMonitoring(d.Get("enhanced_monitoring").(string)), } if v, ok := d.GetOk("logging_info"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -849,13 +847,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int input.OpenMonitoring = expandOpenMonitoringInfo(v.([]interface{})[0].(map[string]interface{})) } - output, err := conn.UpdateMonitoringWithContext(ctx, input) + output, err := conn.UpdateMonitoring(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Cluster (%s) monitoring: %s", d.Id(), err) } - clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + clusterOperationARN := aws.ToString(output.ClusterOperationArn) if _, err := waitClusterOperationCompleted(ctx, conn, clusterOperationARN, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Cluster (%s) operation (%s) complete: %s", d.Id(), clusterOperationARN, err) @@ -877,13 +875,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int input.ConfigurationInfo = expandConfigurationInfo(v.([]interface{})[0].(map[string]interface{})) } - output, err := conn.UpdateClusterConfigurationWithContext(ctx, input) + output, err := conn.UpdateClusterConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Cluster (%s) configuration: %s", d.Id(), err) } - clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + clusterOperationARN := aws.ToString(output.ClusterOperationArn) if _, err := waitClusterOperationCompleted(ctx, conn, clusterOperationARN, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Cluster (%s) operation (%s) complete: %s", d.Id(), clusterOperationARN, err) @@ -908,13 +906,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } } - output, err := conn.UpdateClusterKafkaVersionWithContext(ctx, input) + output, err := conn.UpdateClusterKafkaVersion(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Cluster (%s) Kafka version: %s", d.Id(), err) } - clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + clusterOperationARN := aws.ToString(output.ClusterOperationArn) if _, err := waitClusterOperationCompleted(ctx, conn, clusterOperationARN, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Cluster (%s) operation (%s): %s", d.Id(), clusterOperationARN, err) @@ -950,13 +948,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } } - output, err := conn.UpdateSecurityWithContext(ctx, input) + output, err := conn.UpdateSecurity(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Cluster (%s) security: %s", d.Id(), err) } - clusterOperationARN := aws.StringValue(output.ClusterOperationArn) + clusterOperationARN := aws.ToString(output.ClusterOperationArn) if _, err := waitClusterOperationCompleted(ctx, conn, clusterOperationARN, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Cluster (%s) operation (%s): %s", d.Id(), clusterOperationARN, err) @@ -973,15 +971,14 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) log.Printf("[DEBUG] Deleting MSK Cluster: %s", d.Id()) - _, err := conn.DeleteClusterWithContext(ctx, &kafka.DeleteClusterInput{ + _, err := conn.DeleteCluster(ctx, &kafka.DeleteClusterInput{ ClusterArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { + if errs.IsA[*types.NotFoundException](err) { return diags } @@ -996,19 +993,242 @@ func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta int return diags } -func expandBrokerNodeGroupInfo(tfMap map[string]interface{}) *kafka.BrokerNodeGroupInfo { +func refreshClusterVersion(ctx context.Context, d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).KafkaClient(ctx) + + cluster, err := findClusterByARN(ctx, conn, d.Id()) + + if err != nil { + return fmt.Errorf("reading MSK Cluster (%s): %w", d.Id(), err) + } + + d.Set("current_version", cluster.CurrentVersion) + + return nil +} + +func findClusterByARN(ctx context.Context, conn *kafka.Client, arn string) (*types.ClusterInfo, error) { + input := &kafka.DescribeClusterInput{ + ClusterArn: aws.String(arn), + } + + output, err := conn.DescribeCluster(ctx, input) + + if errs.IsA[*types.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.ClusterInfo == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ClusterInfo, nil +} + +func findClusterV2ByARN(ctx context.Context, conn *kafka.Client, arn string) (*types.Cluster, error) { + input := &kafka.DescribeClusterV2Input{ + ClusterArn: aws.String(arn), + } + + output, err := conn.DescribeClusterV2(ctx, input) + + if errs.IsA[*types.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.ClusterInfo == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ClusterInfo, nil +} + +func findClusterOperationByARN(ctx context.Context, conn *kafka.Client, arn string) (*types.ClusterOperationInfo, error) { + input := &kafka.DescribeClusterOperationInput{ + ClusterOperationArn: aws.String(arn), + } + + output, err := conn.DescribeClusterOperation(ctx, input) + + if errs.IsA[*types.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.ClusterOperationInfo == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ClusterOperationInfo, nil +} + +func findBootstrapBrokersByARN(ctx context.Context, conn *kafka.Client, arn string) (*kafka.GetBootstrapBrokersOutput, error) { + input := &kafka.GetBootstrapBrokersInput{ + ClusterArn: aws.String(arn), + } + + output, err := conn.GetBootstrapBrokers(ctx, input) + + if errs.IsA[*types.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusClusterState(ctx context.Context, conn *kafka.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findClusterV2ByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State), nil + } +} + +func statusClusterOperationState(ctx context.Context, conn *kafka.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findClusterOperationByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.ToString(output.OperationState), nil + } +} + +func waitClusterCreated(ctx context.Context, conn *kafka.Client, arn string, timeout time.Duration) (*types.Cluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.ClusterStateCreating), + Target: enum.Slice(types.ClusterStateActive), + Refresh: statusClusterState(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Cluster); ok { + if state, stateInfo := output.State, output.StateInfo; state == types.ClusterStateFailed && stateInfo != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateInfo.Code), aws.ToString(stateInfo.Message))) + } + + return output, err + } + + return nil, err +} + +func waitClusterDeleted(ctx context.Context, conn *kafka.Client, arn string, timeout time.Duration) (*types.Cluster, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.ClusterStateDeleting), + Target: []string{}, + Refresh: statusClusterState(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.Cluster); ok { + if state, stateInfo := output.State, output.StateInfo; state == types.ClusterStateFailed && stateInfo != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateInfo.Code), aws.ToString(stateInfo.Message))) + } + + return output, err + } + + return nil, err +} + +func waitClusterOperationCompleted(ctx context.Context, conn *kafka.Client, arn string, timeout time.Duration) (*types.ClusterOperationInfo, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{clusterOperationStatePending, clusterOperationStateUpdateInProgress}, + Target: []string{clusterOperationStateUpdateComplete}, + Refresh: statusClusterOperationState(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.ClusterOperationInfo); ok { + if state, errorInfo := aws.ToString(output.OperationState), output.ErrorInfo; state == clusterOperationStateUpdateFailed && errorInfo != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(errorInfo.ErrorCode), aws.ToString(errorInfo.ErrorString))) + } + + return output, err + } + + return nil, err +} + +func clusterUUIDFromARN(clusterARN string) (string, error) { + parsedARN, err := arn.Parse(clusterARN) + if err != nil { + return "", err + } + + // arn:${Partition}:kafka:${Region}:${Account}:cluster/${ClusterName}/${Uuid} + parts := strings.Split(parsedARN.Resource, "/") + if len(parts) != 3 || parts[0] != "cluster" || parts[1] == "" || parts[2] == "" { + return "", fmt.Errorf("invalid MSK Cluster ARN (%s)", clusterARN) + } + return parts[2], nil +} + +func expandBrokerNodeGroupInfo(tfMap map[string]interface{}) *types.BrokerNodeGroupInfo { if tfMap == nil { return nil } - apiObject := &kafka.BrokerNodeGroupInfo{} + apiObject := &types.BrokerNodeGroupInfo{} if v, ok := tfMap["az_distribution"].(string); ok && v != "" { - apiObject.BrokerAZDistribution = aws.String(v) + apiObject.BrokerAZDistribution = types.BrokerAZDistribution(v) } if v, ok := tfMap["client_subnets"].(*schema.Set); ok && v.Len() > 0 { - apiObject.ClientSubnets = flex.ExpandStringSet(v) + apiObject.ClientSubnets = flex.ExpandStringValueSet(v) } if v, ok := tfMap["connectivity_info"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -1020,7 +1240,7 @@ func expandBrokerNodeGroupInfo(tfMap map[string]interface{}) *kafka.BrokerNodeGr } if v, ok := tfMap["security_groups"].(*schema.Set); ok && v.Len() > 0 { - apiObject.SecurityGroups = flex.ExpandStringSet(v) + apiObject.SecurityGroups = flex.ExpandStringValueSet(v) } if v, ok := tfMap["storage_info"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -1030,12 +1250,12 @@ func expandBrokerNodeGroupInfo(tfMap map[string]interface{}) *kafka.BrokerNodeGr return apiObject } -func expandConnectivityInfo(tfMap map[string]interface{}) *kafka.ConnectivityInfo { +func expandConnectivityInfo(tfMap map[string]interface{}) *types.ConnectivityInfo { if tfMap == nil { return nil } - apiObject := &kafka.ConnectivityInfo{} + apiObject := &types.ConnectivityInfo{} if v, ok := tfMap["public_access"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.PublicAccess = expandPublicAccess(v[0].(map[string]interface{})) @@ -1048,12 +1268,12 @@ func expandConnectivityInfo(tfMap map[string]interface{}) *kafka.ConnectivityInf return apiObject } -func expandStorageInfo(tfMap map[string]interface{}) *kafka.StorageInfo { +func expandStorageInfo(tfMap map[string]interface{}) *types.StorageInfo { if tfMap == nil { return nil } - apiObject := &kafka.StorageInfo{} + apiObject := &types.StorageInfo{} if v, ok := tfMap["ebs_storage_info"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.EbsStorageInfo = expandEBSStorageInfo(v[0].(map[string]interface{})) @@ -1062,48 +1282,48 @@ func expandStorageInfo(tfMap map[string]interface{}) *kafka.StorageInfo { return apiObject } -func expandEBSStorageInfo(tfMap map[string]interface{}) *kafka.EBSStorageInfo { +func expandEBSStorageInfo(tfMap map[string]interface{}) *types.EBSStorageInfo { if tfMap == nil { return nil } - apiObject := &kafka.EBSStorageInfo{} + apiObject := &types.EBSStorageInfo{} if v, ok := tfMap["provisioned_throughput"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ProvisionedThroughput = expandProvisionedThroughput(v[0].(map[string]interface{})) } if v, ok := tfMap["volume_size"].(int); ok && v != 0 { - apiObject.VolumeSize = aws.Int64(int64(v)) + apiObject.VolumeSize = aws.Int32(int32(v)) } return apiObject } -func expandProvisionedThroughput(tfMap map[string]interface{}) *kafka.ProvisionedThroughput { +func expandProvisionedThroughput(tfMap map[string]interface{}) *types.ProvisionedThroughput { if tfMap == nil { return nil } - apiObject := &kafka.ProvisionedThroughput{} + apiObject := &types.ProvisionedThroughput{} if v, ok := tfMap["enabled"].(bool); ok { apiObject.Enabled = aws.Bool(v) } if v, ok := tfMap["volume_throughput"].(int); ok && v != 0 { - apiObject.VolumeThroughput = aws.Int64(int64(v)) + apiObject.VolumeThroughput = aws.Int32(int32(v)) } return apiObject } -func expandPublicAccess(tfMap map[string]interface{}) *kafka.PublicAccess { +func expandPublicAccess(tfMap map[string]interface{}) *types.PublicAccess { if tfMap == nil { return nil } - apiObject := &kafka.PublicAccess{} + apiObject := &types.PublicAccess{} if v, ok := tfMap["type"].(string); ok && v != "" { apiObject.Type = aws.String(v) @@ -1112,12 +1332,12 @@ func expandPublicAccess(tfMap map[string]interface{}) *kafka.PublicAccess { return apiObject } -func expandVPCConnectivity(tfMap map[string]interface{}) *kafka.VpcConnectivity { +func expandVPCConnectivity(tfMap map[string]interface{}) *types.VpcConnectivity { if tfMap == nil { return nil } - apiObject := &kafka.VpcConnectivity{} + apiObject := &types.VpcConnectivity{} if v, ok := tfMap["client_authentication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ClientAuthentication = expandVPCConnectivityClientAuthentication(v[0].(map[string]interface{})) @@ -1126,19 +1346,19 @@ func expandVPCConnectivity(tfMap map[string]interface{}) *kafka.VpcConnectivity return apiObject } -func expandVPCConnectivityClientAuthentication(tfMap map[string]interface{}) *kafka.VpcConnectivityClientAuthentication { +func expandVPCConnectivityClientAuthentication(tfMap map[string]interface{}) *types.VpcConnectivityClientAuthentication { if tfMap == nil { return nil } - apiObject := &kafka.VpcConnectivityClientAuthentication{} + apiObject := &types.VpcConnectivityClientAuthentication{} if v, ok := tfMap["sasl"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.Sasl = expandVPCConnectivitySASL(v[0].(map[string]interface{})) } if v, ok := tfMap["tls"].(bool); ok { - apiObject.Tls = &kafka.VpcConnectivityTls{ + apiObject.Tls = &types.VpcConnectivityTls{ Enabled: aws.Bool(v), } } @@ -1146,21 +1366,21 @@ func expandVPCConnectivityClientAuthentication(tfMap map[string]interface{}) *ka return apiObject } -func expandVPCConnectivitySASL(tfMap map[string]interface{}) *kafka.VpcConnectivitySasl { +func expandVPCConnectivitySASL(tfMap map[string]interface{}) *types.VpcConnectivitySasl { if tfMap == nil { return nil } - apiObject := &kafka.VpcConnectivitySasl{} + apiObject := &types.VpcConnectivitySasl{} if v, ok := tfMap["iam"].(bool); ok { - apiObject.Iam = &kafka.VpcConnectivityIam{ + apiObject.Iam = &types.VpcConnectivityIam{ Enabled: aws.Bool(v), } } if v, ok := tfMap["scram"].(bool); ok { - apiObject.Scram = &kafka.VpcConnectivityScram{ + apiObject.Scram = &types.VpcConnectivityScram{ Enabled: aws.Bool(v), } } @@ -1168,12 +1388,12 @@ func expandVPCConnectivitySASL(tfMap map[string]interface{}) *kafka.VpcConnectiv return apiObject } -func expandClientAuthentication(tfMap map[string]interface{}) *kafka.ClientAuthentication { +func expandClientAuthentication(tfMap map[string]interface{}) *types.ClientAuthentication { if tfMap == nil { return nil } - apiObject := &kafka.ClientAuthentication{} + apiObject := &types.ClientAuthentication{} if v, ok := tfMap["sasl"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.Sasl = expandSASL(v[0].(map[string]interface{})) @@ -1184,7 +1404,7 @@ func expandClientAuthentication(tfMap map[string]interface{}) *kafka.ClientAuthe } if v, ok := tfMap["unauthenticated"].(bool); ok { - apiObject.Unauthenticated = &kafka.Unauthenticated{ + apiObject.Unauthenticated = &types.Unauthenticated{ Enabled: aws.Bool(v), } } @@ -1192,21 +1412,21 @@ func expandClientAuthentication(tfMap map[string]interface{}) *kafka.ClientAuthe return apiObject } -func expandSASL(tfMap map[string]interface{}) *kafka.Sasl { +func expandSASL(tfMap map[string]interface{}) *types.Sasl { if tfMap == nil { return nil } - apiObject := &kafka.Sasl{} + apiObject := &types.Sasl{} if v, ok := tfMap["iam"].(bool); ok { - apiObject.Iam = &kafka.Iam{ + apiObject.Iam = &types.Iam{ Enabled: aws.Bool(v), } } if v, ok := tfMap["scram"].(bool); ok { - apiObject.Scram = &kafka.Scram{ + apiObject.Scram = &types.Scram{ Enabled: aws.Bool(v), } } @@ -1214,15 +1434,15 @@ func expandSASL(tfMap map[string]interface{}) *kafka.Sasl { return apiObject } -func expandTLS(tfMap map[string]interface{}) *kafka.Tls { +func expandTLS(tfMap map[string]interface{}) *types.Tls { if tfMap == nil { return nil } - apiObject := &kafka.Tls{} + apiObject := &types.Tls{} if v, ok := tfMap["certificate_authority_arns"].(*schema.Set); ok && v.Len() > 0 { - apiObject.CertificateAuthorityArnList = flex.ExpandStringSet(v) + apiObject.CertificateAuthorityArnList = flex.ExpandStringValueSet(v) apiObject.Enabled = aws.Bool(true) } else { apiObject.Enabled = aws.Bool(false) @@ -1231,12 +1451,12 @@ func expandTLS(tfMap map[string]interface{}) *kafka.Tls { return apiObject } -func expandConfigurationInfo(tfMap map[string]interface{}) *kafka.ConfigurationInfo { +func expandConfigurationInfo(tfMap map[string]interface{}) *types.ConfigurationInfo { if tfMap == nil { return nil } - apiObject := &kafka.ConfigurationInfo{} + apiObject := &types.ConfigurationInfo{} if v, ok := tfMap["arn"].(string); ok && v != "" { apiObject.Arn = aws.String(v) @@ -1249,19 +1469,19 @@ func expandConfigurationInfo(tfMap map[string]interface{}) *kafka.ConfigurationI return apiObject } -func expandEncryptionInfo(tfMap map[string]interface{}) *kafka.EncryptionInfo { +func expandEncryptionInfo(tfMap map[string]interface{}) *types.EncryptionInfo { if tfMap == nil { return nil } - apiObject := &kafka.EncryptionInfo{} + apiObject := &types.EncryptionInfo{} if v, ok := tfMap["encryption_in_transit"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.EncryptionInTransit = expandEncryptionInTransit(v[0].(map[string]interface{})) } if v, ok := tfMap["encryption_at_rest_kms_key_arn"].(string); ok && v != "" { - apiObject.EncryptionAtRest = &kafka.EncryptionAtRest{ + apiObject.EncryptionAtRest = &types.EncryptionAtRest{ DataVolumeKMSKeyId: aws.String(v), } } @@ -1269,15 +1489,15 @@ func expandEncryptionInfo(tfMap map[string]interface{}) *kafka.EncryptionInfo { return apiObject } -func expandEncryptionInTransit(tfMap map[string]interface{}) *kafka.EncryptionInTransit { +func expandEncryptionInTransit(tfMap map[string]interface{}) *types.EncryptionInTransit { if tfMap == nil { return nil } - apiObject := &kafka.EncryptionInTransit{} + apiObject := &types.EncryptionInTransit{} if v, ok := tfMap["client_broker"].(string); ok && v != "" { - apiObject.ClientBroker = aws.String(v) + apiObject.ClientBroker = types.ClientBroker(v) } if v, ok := tfMap["in_cluster"].(bool); ok { @@ -1287,12 +1507,12 @@ func expandEncryptionInTransit(tfMap map[string]interface{}) *kafka.EncryptionIn return apiObject } -func expandLoggingInfo(tfMap map[string]interface{}) *kafka.LoggingInfo { +func expandLoggingInfo(tfMap map[string]interface{}) *types.LoggingInfo { if tfMap == nil { return nil } - apiObject := &kafka.LoggingInfo{} + apiObject := &types.LoggingInfo{} if v, ok := tfMap["broker_logs"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.BrokerLogs = expandBrokerLogs(v[0].(map[string]interface{})) @@ -1301,12 +1521,12 @@ func expandLoggingInfo(tfMap map[string]interface{}) *kafka.LoggingInfo { return apiObject } -func expandBrokerLogs(tfMap map[string]interface{}) *kafka.BrokerLogs { +func expandBrokerLogs(tfMap map[string]interface{}) *types.BrokerLogs { if tfMap == nil { return nil } - apiObject := &kafka.BrokerLogs{} + apiObject := &types.BrokerLogs{} if v, ok := tfMap["cloudwatch_logs"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.CloudWatchLogs = expandCloudWatchLogs(v[0].(map[string]interface{})) @@ -1323,12 +1543,12 @@ func expandBrokerLogs(tfMap map[string]interface{}) *kafka.BrokerLogs { return apiObject } -func expandCloudWatchLogs(tfMap map[string]interface{}) *kafka.CloudWatchLogs { +func expandCloudWatchLogs(tfMap map[string]interface{}) *types.CloudWatchLogs { if tfMap == nil { return nil } - apiObject := &kafka.CloudWatchLogs{} + apiObject := &types.CloudWatchLogs{} if v, ok := tfMap["enabled"].(bool); ok { apiObject.Enabled = aws.Bool(v) @@ -1341,12 +1561,12 @@ func expandCloudWatchLogs(tfMap map[string]interface{}) *kafka.CloudWatchLogs { return apiObject } -func expandFirehose(tfMap map[string]interface{}) *kafka.Firehose { +func expandFirehose(tfMap map[string]interface{}) *types.Firehose { if tfMap == nil { return nil } - apiObject := &kafka.Firehose{} + apiObject := &types.Firehose{} if v, ok := tfMap["delivery_stream"].(string); ok && v != "" { apiObject.DeliveryStream = aws.String(v) @@ -1359,12 +1579,12 @@ func expandFirehose(tfMap map[string]interface{}) *kafka.Firehose { return apiObject } -func expandS3(tfMap map[string]interface{}) *kafka.S3 { +func expandS3(tfMap map[string]interface{}) *types.S3 { if tfMap == nil { return nil } - apiObject := &kafka.S3{} + apiObject := &types.S3{} if v, ok := tfMap["bucket"].(string); ok && v != "" { apiObject.Bucket = aws.String(v) @@ -1381,12 +1601,12 @@ func expandS3(tfMap map[string]interface{}) *kafka.S3 { return apiObject } -func expandOpenMonitoringInfo(tfMap map[string]interface{}) *kafka.OpenMonitoringInfo { +func expandOpenMonitoringInfo(tfMap map[string]interface{}) *types.OpenMonitoringInfo { if tfMap == nil { return nil } - apiObject := &kafka.OpenMonitoringInfo{} + apiObject := &types.OpenMonitoringInfo{} if v, ok := tfMap["prometheus"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.Prometheus = expandPrometheusInfo(v[0].(map[string]interface{})) @@ -1395,12 +1615,12 @@ func expandOpenMonitoringInfo(tfMap map[string]interface{}) *kafka.OpenMonitorin return apiObject } -func expandPrometheusInfo(tfMap map[string]interface{}) *kafka.PrometheusInfo { +func expandPrometheusInfo(tfMap map[string]interface{}) *types.PrometheusInfo { if tfMap == nil { return nil } - apiObject := &kafka.PrometheusInfo{} + apiObject := &types.PrometheusInfo{} if v, ok := tfMap["jmx_exporter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.JmxExporter = expandJmxExporterInfo(v[0].(map[string]interface{})) @@ -1413,12 +1633,12 @@ func expandPrometheusInfo(tfMap map[string]interface{}) *kafka.PrometheusInfo { return apiObject } -func expandJmxExporterInfo(tfMap map[string]interface{}) *kafka.JmxExporterInfo { +func expandJmxExporterInfo(tfMap map[string]interface{}) *types.JmxExporterInfo { if tfMap == nil { return nil } - apiObject := &kafka.JmxExporterInfo{} + apiObject := &types.JmxExporterInfo{} if v, ok := tfMap["enabled_in_broker"].(bool); ok { apiObject.EnabledInBroker = aws.Bool(v) @@ -1427,12 +1647,12 @@ func expandJmxExporterInfo(tfMap map[string]interface{}) *kafka.JmxExporterInfo return apiObject } -func expandNodeExporterInfo(tfMap map[string]interface{}) *kafka.NodeExporterInfo { +func expandNodeExporterInfo(tfMap map[string]interface{}) *types.NodeExporterInfo { if tfMap == nil { return nil } - apiObject := &kafka.NodeExporterInfo{} + apiObject := &types.NodeExporterInfo{} if v, ok := tfMap["enabled_in_broker"].(bool); ok { apiObject.EnabledInBroker = aws.Bool(v) @@ -1441,19 +1661,17 @@ func expandNodeExporterInfo(tfMap map[string]interface{}) *kafka.NodeExporterInf return apiObject } -func flattenBrokerNodeGroupInfo(apiObject *kafka.BrokerNodeGroupInfo) map[string]interface{} { +func flattenBrokerNodeGroupInfo(apiObject *types.BrokerNodeGroupInfo) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.BrokerAZDistribution; v != nil { - tfMap["az_distribution"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "az_distribution": apiObject.BrokerAZDistribution, } if v := apiObject.ClientSubnets; v != nil { - tfMap["client_subnets"] = aws.StringValueSlice(v) + tfMap["client_subnets"] = v } if v := apiObject.ConnectivityInfo; v != nil { @@ -1461,11 +1679,11 @@ func flattenBrokerNodeGroupInfo(apiObject *kafka.BrokerNodeGroupInfo) map[string } if v := apiObject.InstanceType; v != nil { - tfMap["instance_type"] = aws.StringValue(v) + tfMap["instance_type"] = aws.ToString(v) } if v := apiObject.SecurityGroups; v != nil { - tfMap["security_groups"] = aws.StringValueSlice(v) + tfMap["security_groups"] = v } if v := apiObject.StorageInfo; v != nil { @@ -1475,7 +1693,7 @@ func flattenBrokerNodeGroupInfo(apiObject *kafka.BrokerNodeGroupInfo) map[string return tfMap } -func flattenConnectivityInfo(apiObject *kafka.ConnectivityInfo) map[string]interface{} { +func flattenConnectivityInfo(apiObject *types.ConnectivityInfo) map[string]interface{} { if apiObject == nil { return nil } @@ -1493,7 +1711,7 @@ func flattenConnectivityInfo(apiObject *kafka.ConnectivityInfo) map[string]inter return tfMap } -func flattenStorageInfo(apiObject *kafka.StorageInfo) []interface{} { +func flattenStorageInfo(apiObject *types.StorageInfo) []interface{} { if apiObject == nil { return nil } @@ -1507,7 +1725,7 @@ func flattenStorageInfo(apiObject *kafka.StorageInfo) []interface{} { return []interface{}{tfMap} } -func flattenEBSStorageInfo(apiObject *kafka.EBSStorageInfo) []interface{} { +func flattenEBSStorageInfo(apiObject *types.EBSStorageInfo) []interface{} { if apiObject == nil { return nil } @@ -1519,13 +1737,13 @@ func flattenEBSStorageInfo(apiObject *kafka.EBSStorageInfo) []interface{} { } if v := apiObject.VolumeSize; v != nil { - tfMap["volume_size"] = aws.Int64Value(v) + tfMap["volume_size"] = aws.ToInt32(v) } return []interface{}{tfMap} } -func flattenProvisionedThroughput(apiObject *kafka.ProvisionedThroughput) []interface{} { +func flattenProvisionedThroughput(apiObject *types.ProvisionedThroughput) []interface{} { if apiObject == nil { return nil } @@ -1533,17 +1751,17 @@ func flattenProvisionedThroughput(apiObject *kafka.ProvisionedThroughput) []inte tfMap := map[string]interface{}{} if v := apiObject.Enabled; v != nil { - tfMap["enabled"] = aws.BoolValue(v) + tfMap["enabled"] = aws.ToBool(v) } if v := apiObject.VolumeThroughput; v != nil { - tfMap["volume_throughput"] = aws.Int64Value(v) + tfMap["volume_throughput"] = aws.ToInt32(v) } return []interface{}{tfMap} } -func flattenPublicAccess(apiObject *kafka.PublicAccess) map[string]interface{} { +func flattenPublicAccess(apiObject *types.PublicAccess) map[string]interface{} { if apiObject == nil { return nil } @@ -1551,13 +1769,13 @@ func flattenPublicAccess(apiObject *kafka.PublicAccess) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Type; v != nil { - tfMap["type"] = aws.StringValue(v) + tfMap["type"] = aws.ToString(v) } return tfMap } -func flattenVPCConnectivity(apiObject *kafka.VpcConnectivity) map[string]interface{} { +func flattenVPCConnectivity(apiObject *types.VpcConnectivity) map[string]interface{} { if apiObject == nil { return nil } @@ -1570,7 +1788,7 @@ func flattenVPCConnectivity(apiObject *kafka.VpcConnectivity) map[string]interfa return tfMap } -func flattenVPCConnectivityClientAuthentication(apiObject *kafka.VpcConnectivityClientAuthentication) map[string]interface{} { +func flattenVPCConnectivityClientAuthentication(apiObject *types.VpcConnectivityClientAuthentication) map[string]interface{} { if apiObject == nil { return nil } @@ -1583,14 +1801,14 @@ func flattenVPCConnectivityClientAuthentication(apiObject *kafka.VpcConnectivity if v := apiObject.Tls; v != nil { if v := v.Enabled; v != nil { - tfMap["tls"] = aws.BoolValue(v) + tfMap["tls"] = aws.ToBool(v) } } return tfMap } -func flattenVPCConnectivitySASL(apiObject *kafka.VpcConnectivitySasl) map[string]interface{} { +func flattenVPCConnectivitySASL(apiObject *types.VpcConnectivitySasl) map[string]interface{} { if apiObject == nil { return nil } @@ -1599,20 +1817,20 @@ func flattenVPCConnectivitySASL(apiObject *kafka.VpcConnectivitySasl) map[string if v := apiObject.Iam; v != nil { if v := v.Enabled; v != nil { - tfMap["iam"] = aws.BoolValue(v) + tfMap["iam"] = aws.ToBool(v) } } if v := apiObject.Scram; v != nil { if v := v.Enabled; v != nil { - tfMap["scram"] = aws.BoolValue(v) + tfMap["scram"] = aws.ToBool(v) } } return tfMap } -func flattenClientAuthentication(apiObject *kafka.ClientAuthentication) map[string]interface{} { +func flattenClientAuthentication(apiObject *types.ClientAuthentication) map[string]interface{} { if apiObject == nil { return nil } @@ -1629,14 +1847,14 @@ func flattenClientAuthentication(apiObject *kafka.ClientAuthentication) map[stri if v := apiObject.Unauthenticated; v != nil { if v := v.Enabled; v != nil { - tfMap["unauthenticated"] = aws.BoolValue(v) + tfMap["unauthenticated"] = aws.ToBool(v) } } return tfMap } -func flattenSASL(apiObject *kafka.Sasl) map[string]interface{} { +func flattenSASL(apiObject *types.Sasl) map[string]interface{} { if apiObject == nil { return nil } @@ -1645,34 +1863,34 @@ func flattenSASL(apiObject *kafka.Sasl) map[string]interface{} { if v := apiObject.Iam; v != nil { if v := v.Enabled; v != nil { - tfMap["iam"] = aws.BoolValue(v) + tfMap["iam"] = aws.ToBool(v) } } if v := apiObject.Scram; v != nil { if v := v.Enabled; v != nil { - tfMap["scram"] = aws.BoolValue(v) + tfMap["scram"] = aws.ToBool(v) } } return tfMap } -func flattenTLS(apiObject *kafka.Tls) map[string]interface{} { +func flattenTLS(apiObject *types.Tls) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.CertificateAuthorityArnList; v != nil && aws.BoolValue(apiObject.Enabled) { - tfMap["certificate_authority_arns"] = aws.StringValueSlice(v) + if v := apiObject.CertificateAuthorityArnList; v != nil && aws.ToBool(apiObject.Enabled) { + tfMap["certificate_authority_arns"] = v } return tfMap } -func flattenBrokerSoftwareInfo(apiObject *kafka.BrokerSoftwareInfo) map[string]interface{} { +func flattenBrokerSoftwareInfo(apiObject *types.BrokerSoftwareInfo) map[string]interface{} { if apiObject == nil { return nil } @@ -1680,17 +1898,17 @@ func flattenBrokerSoftwareInfo(apiObject *kafka.BrokerSoftwareInfo) map[string]i tfMap := map[string]interface{}{} if v := apiObject.ConfigurationArn; v != nil { - tfMap["arn"] = aws.StringValue(v) + tfMap["arn"] = aws.ToString(v) } if v := apiObject.ConfigurationRevision; v != nil { - tfMap["revision"] = aws.Int64Value(v) + tfMap["revision"] = aws.ToInt64(v) } return tfMap } -func flattenEncryptionInfo(apiObject *kafka.EncryptionInfo) map[string]interface{} { +func flattenEncryptionInfo(apiObject *types.EncryptionInfo) map[string]interface{} { if apiObject == nil { return nil } @@ -1699,7 +1917,7 @@ func flattenEncryptionInfo(apiObject *kafka.EncryptionInfo) map[string]interface if v := apiObject.EncryptionAtRest; v != nil { if v := v.DataVolumeKMSKeyId; v != nil { - tfMap["encryption_at_rest_kms_key_arn"] = aws.StringValue(v) + tfMap["encryption_at_rest_kms_key_arn"] = aws.ToString(v) } } @@ -1710,25 +1928,23 @@ func flattenEncryptionInfo(apiObject *kafka.EncryptionInfo) map[string]interface return tfMap } -func flattenEncryptionInTransit(apiObject *kafka.EncryptionInTransit) map[string]interface{} { +func flattenEncryptionInTransit(apiObject *types.EncryptionInTransit) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.ClientBroker; v != nil { - tfMap["client_broker"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "client_broker": apiObject.ClientBroker, } if v := apiObject.InCluster; v != nil { - tfMap["in_cluster"] = aws.BoolValue(v) + tfMap["in_cluster"] = aws.ToBool(v) } return tfMap } -func flattenLoggingInfo(apiObject *kafka.LoggingInfo) map[string]interface{} { +func flattenLoggingInfo(apiObject *types.LoggingInfo) map[string]interface{} { if apiObject == nil { return nil } @@ -1742,7 +1958,7 @@ func flattenLoggingInfo(apiObject *kafka.LoggingInfo) map[string]interface{} { return tfMap } -func flattenBrokerLogs(apiObject *kafka.BrokerLogs) map[string]interface{} { +func flattenBrokerLogs(apiObject *types.BrokerLogs) map[string]interface{} { if apiObject == nil { return nil } @@ -1764,7 +1980,7 @@ func flattenBrokerLogs(apiObject *kafka.BrokerLogs) map[string]interface{} { return tfMap } -func flattenCloudWatchLogs(apiObject *kafka.CloudWatchLogs) map[string]interface{} { +func flattenCloudWatchLogs(apiObject *types.CloudWatchLogs) map[string]interface{} { if apiObject == nil { return nil } @@ -1772,17 +1988,17 @@ func flattenCloudWatchLogs(apiObject *kafka.CloudWatchLogs) map[string]interface tfMap := map[string]interface{}{} if v := apiObject.Enabled; v != nil { - tfMap["enabled"] = aws.BoolValue(v) + tfMap["enabled"] = aws.ToBool(v) } if v := apiObject.LogGroup; v != nil { - tfMap["log_group"] = aws.StringValue(v) + tfMap["log_group"] = aws.ToString(v) } return tfMap } -func flattenFirehose(apiObject *kafka.Firehose) map[string]interface{} { +func flattenFirehose(apiObject *types.Firehose) map[string]interface{} { if apiObject == nil { return nil } @@ -1790,17 +2006,17 @@ func flattenFirehose(apiObject *kafka.Firehose) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.DeliveryStream; v != nil { - tfMap["delivery_stream"] = aws.StringValue(v) + tfMap["delivery_stream"] = aws.ToString(v) } if v := apiObject.Enabled; v != nil { - tfMap["enabled"] = aws.BoolValue(v) + tfMap["enabled"] = aws.ToBool(v) } return tfMap } -func flattenS3(apiObject *kafka.S3) map[string]interface{} { +func flattenS3(apiObject *types.S3) map[string]interface{} { if apiObject == nil { return nil } @@ -1808,21 +2024,21 @@ func flattenS3(apiObject *kafka.S3) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Bucket; v != nil { - tfMap["bucket"] = aws.StringValue(v) + tfMap["bucket"] = aws.ToString(v) } if v := apiObject.Enabled; v != nil { - tfMap["enabled"] = aws.BoolValue(v) + tfMap["enabled"] = aws.ToBool(v) } if v := apiObject.Prefix; v != nil { - tfMap["prefix"] = aws.StringValue(v) + tfMap["prefix"] = aws.ToString(v) } return tfMap } -func flattenOpenMonitoring(apiObject *kafka.OpenMonitoring) map[string]interface{} { +func flattenOpenMonitoring(apiObject *types.OpenMonitoring) map[string]interface{} { if apiObject == nil { return nil } @@ -1836,7 +2052,7 @@ func flattenOpenMonitoring(apiObject *kafka.OpenMonitoring) map[string]interface return tfMap } -func flattenPrometheus(apiObject *kafka.Prometheus) map[string]interface{} { +func flattenPrometheus(apiObject *types.Prometheus) map[string]interface{} { if apiObject == nil { return nil } @@ -1854,7 +2070,7 @@ func flattenPrometheus(apiObject *kafka.Prometheus) map[string]interface{} { return tfMap } -func flattenJmxExporter(apiObject *kafka.JmxExporter) map[string]interface{} { +func flattenJmxExporter(apiObject *types.JmxExporter) map[string]interface{} { if apiObject == nil { return nil } @@ -1862,13 +2078,13 @@ func flattenJmxExporter(apiObject *kafka.JmxExporter) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.EnabledInBroker; v != nil { - tfMap["enabled_in_broker"] = aws.BoolValue(v) + tfMap["enabled_in_broker"] = aws.ToBool(v) } return tfMap } -func flattenNodeExporter(apiObject *kafka.NodeExporter) map[string]interface{} { +func flattenNodeExporter(apiObject *types.NodeExporter) map[string]interface{} { if apiObject == nil { return nil } @@ -1876,36 +2092,8 @@ func flattenNodeExporter(apiObject *kafka.NodeExporter) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.EnabledInBroker; v != nil { - tfMap["enabled_in_broker"] = aws.BoolValue(v) + tfMap["enabled_in_broker"] = aws.ToBool(v) } return tfMap } - -func refreshClusterVersion(ctx context.Context, d *schema.ResourceData, meta interface{}) error { - conn := meta.(*conns.AWSClient).KafkaConn(ctx) - - cluster, err := FindClusterByARN(ctx, conn, d.Id()) - - if err != nil { - return fmt.Errorf("reading MSK Cluster (%s): %w", d.Id(), err) - } - - d.Set("current_version", cluster.CurrentVersion) - - return nil -} - -func clusterUUIDFromARN(clusterARN string) (string, error) { - parsedARN, err := arn.Parse(clusterARN) - if err != nil { - return "", err - } - - // arn:${Partition}:kafka:${Region}:${Account}:cluster/${ClusterName}/${Uuid} - parts := strings.Split(parsedARN.Resource, "/") - if len(parts) != 3 || parts[0] != "cluster" || parts[1] == "" || parts[2] == "" { - return "", fmt.Errorf("invalid MSK Cluster ARN (%s)", clusterARN) - } - return parts[2], nil -} diff --git a/internal/service/kafka/cluster_data_source.go b/internal/service/kafka/cluster_data_source.go index bbb43cb8d73..764e82d719f 100644 --- a/internal/service/kafka/cluster_data_source.go +++ b/internal/service/kafka/cluster_data_source.go @@ -6,18 +6,21 @@ package kafka import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKDataSource("aws_msk_cluster") -func DataSourceCluster() *schema.Resource { +// @SDKDataSource("aws_msk_cluster", name="Cluster") +func dataSourceCluster() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceClusterRead, @@ -86,72 +89,79 @@ func DataSourceCluster() *schema.Resource { func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig clusterName := d.Get("cluster_name").(string) input := &kafka.ListClustersInput{ ClusterNameFilter: aws.String(clusterName), } - var cluster *kafka.ClusterInfo - - err := conn.ListClustersPagesWithContext(ctx, input, func(page *kafka.ListClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, clusterInfo := range page.ClusterInfoList { - if aws.StringValue(clusterInfo.ClusterName) == clusterName { - cluster = clusterInfo - - return false - } - } - - return !lastPage + cluster, err := findCluster(ctx, conn, input, func(v *types.ClusterInfo) bool { + return aws.ToString(v.ClusterName) == clusterName }) if err != nil { - return sdkdiag.AppendErrorf(diags, "listing MSK Clusters: %s", err) + return sdkdiag.AppendErrorf(diags, "reading MSK Cluster (%s): %s", clusterName, err) } - if cluster == nil { - return sdkdiag.AppendErrorf(diags, "reading MSK Cluster (%s): no results found", clusterName) - } - - bootstrapBrokersInput := &kafka.GetBootstrapBrokersInput{ - ClusterArn: cluster.ClusterArn, - } - - bootstrapBrokersOutput, err := conn.GetBootstrapBrokersWithContext(ctx, bootstrapBrokersInput) + clusterARN := aws.ToString(cluster.ClusterArn) + bootstrapBrokersOutput, err := findBootstrapBrokersByARN(ctx, conn, clusterARN) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading MSK Cluster (%s) bootstrap brokers: %s", aws.StringValue(cluster.ClusterArn), err) + return sdkdiag.AppendErrorf(diags, "reading MSK Cluster (%s) bootstrap brokers: %s", clusterARN, err) } - clusterARN := aws.StringValue(cluster.ClusterArn) + d.SetId(clusterARN) d.Set("arn", clusterARN) - d.Set("bootstrap_brokers", SortEndpointsString(aws.StringValue(bootstrapBrokersOutput.BootstrapBrokerString))) - d.Set("bootstrap_brokers_public_sasl_iam", SortEndpointsString(aws.StringValue(bootstrapBrokersOutput.BootstrapBrokerStringPublicSaslIam))) - d.Set("bootstrap_brokers_public_sasl_scram", SortEndpointsString(aws.StringValue(bootstrapBrokersOutput.BootstrapBrokerStringPublicSaslScram))) - d.Set("bootstrap_brokers_public_tls", SortEndpointsString(aws.StringValue(bootstrapBrokersOutput.BootstrapBrokerStringPublicTls))) - d.Set("bootstrap_brokers_sasl_iam", SortEndpointsString(aws.StringValue(bootstrapBrokersOutput.BootstrapBrokerStringSaslIam))) - d.Set("bootstrap_brokers_sasl_scram", SortEndpointsString(aws.StringValue(bootstrapBrokersOutput.BootstrapBrokerStringSaslScram))) - d.Set("bootstrap_brokers_tls", SortEndpointsString(aws.StringValue(bootstrapBrokersOutput.BootstrapBrokerStringTls))) + d.Set("bootstrap_brokers", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerString))) + d.Set("bootstrap_brokers_public_sasl_iam", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerStringPublicSaslIam))) + d.Set("bootstrap_brokers_public_sasl_scram", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerStringPublicSaslScram))) + d.Set("bootstrap_brokers_public_tls", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerStringPublicTls))) + d.Set("bootstrap_brokers_sasl_iam", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerStringSaslIam))) + d.Set("bootstrap_brokers_sasl_scram", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerStringSaslScram))) + d.Set("bootstrap_brokers_tls", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerStringTls))) d.Set("cluster_name", cluster.ClusterName) clusterUUID, _ := clusterUUIDFromARN(clusterARN) d.Set("cluster_uuid", clusterUUID) d.Set("kafka_version", cluster.CurrentBrokerSoftwareInfo.KafkaVersion) d.Set("number_of_broker_nodes", cluster.NumberOfBrokerNodes) + d.Set("zookeeper_connect_string", SortEndpointsString(aws.ToString(cluster.ZookeeperConnectString))) + d.Set("zookeeper_connect_string_tls", SortEndpointsString(aws.ToString(cluster.ZookeeperConnectStringTls))) if err := d.Set("tags", KeyValueTags(ctx, cluster.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) } - d.Set("zookeeper_connect_string", SortEndpointsString(aws.StringValue(cluster.ZookeeperConnectString))) - d.Set("zookeeper_connect_string_tls", SortEndpointsString(aws.StringValue(cluster.ZookeeperConnectStringTls))) + return diags +} - d.SetId(aws.StringValue(cluster.ClusterArn)) +func findCluster(ctx context.Context, conn *kafka.Client, input *kafka.ListClustersInput, filter tfslices.Predicate[*types.ClusterInfo]) (*types.ClusterInfo, error) { + output, err := findClusters(ctx, conn, input, filter) - return diags + if err != nil { + return nil, err + } + + return tfresource.AssertFirstValueResult(output) +} + +func findClusters(ctx context.Context, conn *kafka.Client, input *kafka.ListClustersInput, filter tfslices.Predicate[*types.ClusterInfo]) ([]types.ClusterInfo, error) { + var output []types.ClusterInfo + + pages := kafka.NewListClustersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.ClusterInfoList { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil } diff --git a/internal/service/kafka/cluster_data_source_test.go b/internal/service/kafka/cluster_data_source_test.go index 9126cf01d06..35f08ef22c9 100644 --- a/internal/service/kafka/cluster_data_source_test.go +++ b/internal/service/kafka/cluster_data_source_test.go @@ -7,10 +7,10 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafka" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccKafkaClusterDataSource_basic(t *testing.T) { @@ -21,9 +21,8 @@ func TestAccKafkaClusterDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccClusterDataSourceConfig_basic(rName), diff --git a/internal/service/kafka/cluster_policy.go b/internal/service/kafka/cluster_policy.go index 001522f4c42..49c45052daa 100644 --- a/internal/service/kafka/cluster_policy.go +++ b/internal/service/kafka/cluster_policy.go @@ -23,7 +23,7 @@ import ( ) // @SDKResource("aws_msk_cluster_policy", name="Cluster Policy") -func ResourceClusterPolicy() *schema.Resource { +func resourceClusterPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceClusterPolicyPut, ReadWithoutTimeout: resourceClusterPolicyRead, @@ -94,7 +94,7 @@ func resourceClusterPolicyRead(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).KafkaClient(ctx) - output, err := FindClusterPolicyByARN(ctx, conn, d.Id()) + output, err := findClusterPolicyByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] MSK Cluster Policy (%s) not found, removing from state", d.Id()) @@ -142,7 +142,7 @@ func resourceClusterPolicyDelete(ctx context.Context, d *schema.ResourceData, me return diags } -func FindClusterPolicyByARN(ctx context.Context, conn *kafka.Client, id string) (*kafka.GetClusterPolicyOutput, error) { +func findClusterPolicyByARN(ctx context.Context, conn *kafka.Client, id string) (*kafka.GetClusterPolicyOutput, error) { in := &kafka.GetClusterPolicyInput{ ClusterArn: aws.String(id), } diff --git a/internal/service/kafka/cluster_test.go b/internal/service/kafka/cluster_test.go index d59e4ea5452..353477925ba 100644 --- a/internal/service/kafka/cluster_test.go +++ b/internal/service/kafka/cluster_test.go @@ -12,9 +12,10 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/aws/aws-sdk-go/service/acmpca" - "github.com/aws/aws-sdk-go/service/kafka" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,6 +23,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfkafka "github.com/hashicorp/terraform-provider-aws/internal/service/kafka" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) const ( @@ -50,13 +52,13 @@ var ( func TestAccKafkaCluster_basic(t *testing.T) { ctx := acctest.Context(t) - var cluster kafka.ClusterInfo + var cluster types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -76,7 +78,7 @@ func TestAccKafkaCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "bootstrap_brokers_vpc_connectivity_sasl_scram", ""), resource.TestCheckResourceAttr(resourceName, "bootstrap_brokers_vpc_connectivity_tls", ""), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", "1"), - resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.az_distribution", kafka.BrokerAZDistributionDefault), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.az_distribution", string(types.BrokerAZDistributionDefault)), resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.client_subnets.#", "3"), resource.TestCheckTypeSetElemAttrPair(resourceName, "broker_node_group_info.0.client_subnets.*", "aws_subnet.test.0", "id"), resource.TestCheckTypeSetElemAttrPair(resourceName, "broker_node_group_info.0.client_subnets.*", "aws_subnet.test.1", "id"), @@ -107,7 +109,7 @@ func TestAccKafkaCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.#", "1"), resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.0.client_broker", "TLS"), resource.TestCheckResourceAttr(resourceName, "encryption_info.0.encryption_in_transit.0.in_cluster", "true"), - resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", kafka.EnhancedMonitoringDefault), + resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", string(types.EnhancedMonitoringDefault)), resource.TestCheckResourceAttr(resourceName, "kafka_version", "2.8.1"), resource.TestCheckResourceAttr(resourceName, "number_of_broker_nodes", "3"), resource.TestCheckResourceAttrSet(resourceName, "storage_mode"), @@ -131,13 +133,13 @@ func TestAccKafkaCluster_basic(t *testing.T) { func TestAccKafkaCluster_disappears(t *testing.T) { ctx := acctest.Context(t) - var cluster kafka.ClusterInfo + var cluster types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -155,13 +157,13 @@ func TestAccKafkaCluster_disappears(t *testing.T) { func TestAccKafkaCluster_tags(t *testing.T) { ctx := acctest.Context(t) - var cluster kafka.ClusterInfo + var cluster types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -204,7 +206,7 @@ func TestAccKafkaCluster_tags(t *testing.T) { func TestAccKafkaCluster_BrokerNodeGroupInfo_storageInfo(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" original_volume_size := 11 @@ -212,7 +214,7 @@ func TestAccKafkaCluster_BrokerNodeGroupInfo_storageInfo(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -257,13 +259,13 @@ func TestAccKafkaCluster_BrokerNodeGroupInfo_storageInfo(t *testing.T) { func TestAccKafkaCluster_BrokerNodeGroupInfo_instanceType(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -300,13 +302,13 @@ func TestAccKafkaCluster_BrokerNodeGroupInfo_instanceType(t *testing.T) { func TestAccKafkaCluster_BrokerNodeGroupInfo_publicAccessSASLIAM(t *testing.T) { ctx := acctest.Context(t) - var cluster1 kafka.ClusterInfo + var cluster1 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -388,13 +390,13 @@ func TestAccKafkaCluster_BrokerNodeGroupInfo_publicAccessSASLIAM(t *testing.T) { func TestAccKafkaCluster_BrokerNodeGroupInfo_vpcConnectivity(t *testing.T) { ctx := acctest.Context(t) - var cluster1 kafka.ClusterInfo + var cluster1 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -423,13 +425,13 @@ func TestAccKafkaCluster_BrokerNodeGroupInfo_vpcConnectivity(t *testing.T) { func TestAccKafkaCluster_ClientAuthenticationSASL_scram(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -488,13 +490,13 @@ func TestAccKafkaCluster_ClientAuthenticationSASL_scram(t *testing.T) { func TestAccKafkaCluster_ClientAuthenticationSASL_iam(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -553,7 +555,7 @@ func TestAccKafkaCluster_ClientAuthenticationSASL_iam(t *testing.T) { func TestAccKafkaCluster_ClientAuthenticationTLS_certificateAuthorityARNs(t *testing.T) { ctx := acctest.Context(t) - var cluster1 kafka.ClusterInfo + var cluster1 types.ClusterInfo var ca acmpca.CertificateAuthority rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" @@ -562,7 +564,7 @@ func TestAccKafkaCluster_ClientAuthenticationTLS_certificateAuthorityARNs(t *tes resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -614,7 +616,7 @@ func TestAccKafkaCluster_ClientAuthenticationTLS_certificateAuthorityARNs(t *tes func TestAccKafkaCluster_ClientAuthenticationTLS_initiallyNoAuthentication(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo var ca acmpca.CertificateAuthority rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" @@ -623,7 +625,7 @@ func TestAccKafkaCluster_ClientAuthenticationTLS_initiallyNoAuthentication(t *te resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -687,7 +689,7 @@ func TestAccKafkaCluster_ClientAuthenticationTLS_initiallyNoAuthentication(t *te func TestAccKafkaCluster_Info_revision(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) configurationResourceName := "aws_msk_configuration.test1" configurationResourceName2 := "aws_msk_configuration.test2" @@ -695,7 +697,7 @@ func TestAccKafkaCluster_Info_revision(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -732,13 +734,13 @@ func TestAccKafkaCluster_Info_revision(t *testing.T) { func TestAccKafkaCluster_EncryptionInfo_encryptionAtRestKMSKeyARN(t *testing.T) { ctx := acctest.Context(t) - var cluster kafka.ClusterInfo + var cluster types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -763,13 +765,13 @@ func TestAccKafkaCluster_EncryptionInfo_encryptionAtRestKMSKeyARN(t *testing.T) func TestAccKafkaCluster_EncryptionInfoEncryptionInTransit_clientBroker(t *testing.T) { ctx := acctest.Context(t) - var cluster1 kafka.ClusterInfo + var cluster1 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -800,13 +802,13 @@ func TestAccKafkaCluster_EncryptionInfoEncryptionInTransit_clientBroker(t *testi func TestAccKafkaCluster_EncryptionInfoEncryptionInTransit_inCluster(t *testing.T) { ctx := acctest.Context(t) - var cluster1 kafka.ClusterInfo + var cluster1 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -833,13 +835,13 @@ func TestAccKafkaCluster_EncryptionInfoEncryptionInTransit_inCluster(t *testing. func TestAccKafkaCluster_enhancedMonitoring(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -847,7 +849,7 @@ func TestAccKafkaCluster_enhancedMonitoring(t *testing.T) { Config: testAccClusterConfig_enhancedMonitoring(rName, "PER_BROKER"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster1), - resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", kafka.EnhancedMonitoringPerBroker), + resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", string(types.EnhancedMonitoringPerBroker)), ), }, { @@ -863,7 +865,7 @@ func TestAccKafkaCluster_enhancedMonitoring(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster2), testAccCheckClusterNotRecreated(&cluster1, &cluster2), - resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", kafka.EnhancedMonitoringPerTopicPerBroker), + resource.TestCheckResourceAttr(resourceName, "enhanced_monitoring", string(types.EnhancedMonitoringPerTopicPerBroker)), ), }, }, @@ -872,13 +874,13 @@ func TestAccKafkaCluster_enhancedMonitoring(t *testing.T) { func TestAccKafkaCluster_numberOfBrokerNodes(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -929,13 +931,13 @@ func TestAccKafkaCluster_numberOfBrokerNodes(t *testing.T) { func TestAccKafkaCluster_openMonitoring(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -978,13 +980,13 @@ func TestAccKafkaCluster_openMonitoring(t *testing.T) { func TestAccKafkaCluster_storageMode(t *testing.T) { ctx := acctest.Context(t) - var cluster kafka.ClusterInfo + var cluster types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -1002,13 +1004,13 @@ func TestAccKafkaCluster_storageMode(t *testing.T) { func TestAccKafkaCluster_loggingInfo(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -1055,13 +1057,13 @@ func TestAccKafkaCluster_loggingInfo(t *testing.T) { func TestAccKafkaCluster_kafkaVersionUpgrade(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -1094,13 +1096,13 @@ func TestAccKafkaCluster_kafkaVersionUpgrade(t *testing.T) { func TestAccKafkaCluster_kafkaVersionDowngrade(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -1143,7 +1145,7 @@ func TestAccKafkaCluster_kafkaVersionDowngrade(t *testing.T) { func TestAccKafkaCluster_kafkaVersionUpgradeWithInfo(t *testing.T) { ctx := acctest.Context(t) - var cluster1, cluster2 kafka.ClusterInfo + var cluster1, cluster2 types.ClusterInfo rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) configurationResourceName1 := "aws_msk_configuration.config1" configurationResourceName2 := "aws_msk_configuration.config2" @@ -1151,7 +1153,7 @@ func TestAccKafkaCluster_kafkaVersionUpgradeWithInfo(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -1211,7 +1213,7 @@ func testAccCheckResourceAttrIsSortedCSV(resourceName, attributeName string) res func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_msk_cluster" { @@ -1235,18 +1237,14 @@ func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckClusterExists(ctx context.Context, n string, v *kafka.ClusterInfo) resource.TestCheckFunc { +func testAccCheckClusterExists(ctx context.Context, n string, v *types.ClusterInfo) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No MSK Cluster ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) output, err := tfkafka.FindClusterByARN(ctx, conn, rs.Primary.ID) @@ -1260,20 +1258,20 @@ func testAccCheckClusterExists(ctx context.Context, n string, v *kafka.ClusterIn } } -func testAccCheckClusterNotRecreated(i, j *kafka.ClusterInfo) resource.TestCheckFunc { +func testAccCheckClusterNotRecreated(i, j *types.ClusterInfo) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.ClusterArn) != aws.StringValue(j.ClusterArn) { - return fmt.Errorf("MSK Cluster (%s) recreated", aws.StringValue(i.ClusterArn)) + if aws.ToString(i.ClusterArn) != aws.ToString(j.ClusterArn) { + return fmt.Errorf("MSK Cluster (%s) recreated", aws.ToString(i.ClusterArn)) } return nil } } -func testAccCheckClusterRecreated(i, j *kafka.ClusterInfo) resource.TestCheckFunc { +func testAccCheckClusterRecreated(i, j *types.ClusterInfo) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.ClusterArn) == aws.StringValue(j.ClusterArn) { - return fmt.Errorf("MSK Cluster (%s) was not recreated", aws.StringValue(i.ClusterArn)) + if aws.ToString(i.ClusterArn) == aws.ToString(j.ClusterArn) { + return fmt.Errorf("MSK Cluster (%s) was not recreated", aws.ToString(i.ClusterArn)) } return nil @@ -1281,11 +1279,11 @@ func testAccCheckClusterRecreated(i, j *kafka.ClusterInfo) resource.TestCheckFun } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) input := &kafka.ListClustersInput{} - _, err := conn.ListClustersWithContext(ctx, input) + _, err := conn.ListClusters(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/kafka/configuration.go b/internal/service/kafka/configuration.go index a5d03fbe211..fe0519dd77a 100644 --- a/internal/service/kafka/configuration.go +++ b/internal/service/kafka/configuration.go @@ -6,20 +6,25 @@ package kafka import ( "context" "log" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_msk_configuration") -func ResourceConfiguration() *schema.Resource { +// @SDKResource("aws_msk_configuration", name="Configuration") +func resourceConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceConfigurationCreate, ReadWithoutTimeout: resourceConfigurationRead, @@ -72,7 +77,7 @@ func ResourceConfiguration() *schema.Resource { func resourceConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) input := &kafka.CreateConfigurationInput{ Name: aws.String(d.Get("name").(string)), @@ -84,71 +89,46 @@ func resourceConfigurationCreate(ctx context.Context, d *schema.ResourceData, me } if v, ok := d.GetOk("kafka_versions"); ok && v.(*schema.Set).Len() > 0 { - input.KafkaVersions = flex.ExpandStringSet(v.(*schema.Set)) + input.KafkaVersions = flex.ExpandStringValueSet(v.(*schema.Set)) } - output, err := conn.CreateConfigurationWithContext(ctx, input) + output, err := conn.CreateConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK Configuration: %s", err) } - d.SetId(aws.StringValue(output.Arn)) + d.SetId(aws.ToString(output.Arn)) return append(diags, resourceConfigurationRead(ctx, d, meta)...) } func resourceConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) - configurationInput := &kafka.DescribeConfigurationInput{ - Arn: aws.String(d.Id()), - } + configurationOutput, err := findConfigurationByARN(ctx, conn, d.Id()) - configurationOutput, err := conn.DescribeConfigurationWithContext(ctx, configurationInput) - - if tfawserr.ErrMessageContains(err, kafka.ErrCodeBadRequestException, "Configuration ARN does not exist") { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] MSK Configuration (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "describing MSK Configuration (%s): %s", d.Id(), err) - } - - if configurationOutput == nil { - return sdkdiag.AppendErrorf(diags, "describing MSK Configuration (%s): missing result", d.Id()) - } - - if configurationOutput.LatestRevision == nil { - return sdkdiag.AppendErrorf(diags, "describing MSK Configuration (%s): missing latest revision", d.Id()) + return sdkdiag.AppendErrorf(diags, "reading MSK Configuration (%s): %s", d.Id(), err) } - revision := configurationOutput.LatestRevision.Revision - revisionInput := &kafka.DescribeConfigurationRevisionInput{ - Arn: aws.String(d.Id()), - Revision: revision, - } - - revisionOutput, err := conn.DescribeConfigurationRevisionWithContext(ctx, revisionInput) + revision := aws.ToInt64(configurationOutput.LatestRevision.Revision) + revisionOutput, err := findConfigurationRevisionByTwoPartKey(ctx, conn, d.Id(), revision) if err != nil { - return sdkdiag.AppendErrorf(diags, "describing MSK Configuration (%s) Revision (%d): %s", d.Id(), aws.Int64Value(revision), err) - } - - if revisionOutput == nil { - return sdkdiag.AppendErrorf(diags, "describing MSK Configuration (%s) Revision (%d): missing result", d.Id(), aws.Int64Value(revision)) + return sdkdiag.AppendErrorf(diags, "reading MSK Configuration (%s) revision (%d): %s", d.Id(), revision, err) } d.Set("arn", configurationOutput.Arn) d.Set("description", revisionOutput.Description) - - if err := d.Set("kafka_versions", aws.StringValueSlice(configurationOutput.KafkaVersions)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting kafka_versions: %s", err) - } - + d.Set("kafka_versions", configurationOutput.KafkaVersions) d.Set("latest_revision", revision) d.Set("name", configurationOutput.Name) d.Set("server_properties", string(revisionOutput.ServerProperties)) @@ -158,7 +138,7 @@ func resourceConfigurationRead(ctx context.Context, d *schema.ResourceData, meta func resourceConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) input := &kafka.UpdateConfigurationInput{ Arn: aws.String(d.Id()), @@ -169,7 +149,7 @@ func resourceConfigurationUpdate(ctx context.Context, d *schema.ResourceData, me input.Description = aws.String(v.(string)) } - _, err := conn.UpdateConfigurationWithContext(ctx, input) + _, err := conn.UpdateConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating MSK Configuration (%s): %s", d.Id(), err) @@ -180,14 +160,16 @@ func resourceConfigurationUpdate(ctx context.Context, d *schema.ResourceData, me func resourceConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) - input := &kafka.DeleteConfigurationInput{ + log.Printf("[DEBUG] Deleting MSK Configuration: %s", d.Id()) + _, err := conn.DeleteConfiguration(ctx, &kafka.DeleteConfigurationInput{ Arn: aws.String(d.Id()), - } + }) - log.Printf("[DEBUG] Deleting MSK Configuration: %s", d.Id()) - _, err := conn.DeleteConfigurationWithContext(ctx, input) + if errs.IsAErrorMessageContains[*types.BadRequestException](err, "Configuration ARN does not exist") { + return diags + } if err != nil { return sdkdiag.AppendErrorf(diags, "deleting MSK Configuration (%s): %s", d.Id(), err) @@ -199,3 +181,83 @@ func resourceConfigurationDelete(ctx context.Context, d *schema.ResourceData, me return diags } + +func findConfigurationByARN(ctx context.Context, conn *kafka.Client, arn string) (*kafka.DescribeConfigurationOutput, error) { + input := &kafka.DescribeConfigurationInput{ + Arn: aws.String(arn), + } + + output, err := conn.DescribeConfiguration(ctx, input) + + if errs.IsAErrorMessageContains[*types.BadRequestException](err, "Configuration ARN does not exist") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.LatestRevision == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func findConfigurationRevisionByTwoPartKey(ctx context.Context, conn *kafka.Client, arn string, revision int64) (*kafka.DescribeConfigurationRevisionOutput, error) { + input := &kafka.DescribeConfigurationRevisionInput{ + Arn: aws.String(arn), + Revision: aws.Int64(revision), + } + + output, err := conn.DescribeConfigurationRevision(ctx, input) + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusConfigurationState(ctx context.Context, conn *kafka.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findConfigurationByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State), nil + } +} + +func waitConfigurationDeleted(ctx context.Context, conn *kafka.Client, arn string) (*kafka.DescribeConfigurationOutput, error) { + const ( + timeout = 5 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.ConfigurationStateDeleting), + Target: []string{}, + Refresh: statusConfigurationState(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafka.DescribeConfigurationOutput); ok { + return output, err + } + + return nil, err +} diff --git a/internal/service/kafka/configuration_data_source.go b/internal/service/kafka/configuration_data_source.go index e034d5cb389..157f6f63ea1 100644 --- a/internal/service/kafka/configuration_data_source.go +++ b/internal/service/kafka/configuration_data_source.go @@ -6,16 +6,19 @@ package kafka import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKDataSource("aws_msk_configuration") -func DataSourceConfiguration() *schema.Resource { +// @SDKDataSource("aws_msk_configuration", name="Configuration") +func dataSourceConfiguration() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceConfigurationRead, @@ -53,62 +56,68 @@ func DataSourceConfiguration() *schema.Resource { func dataSourceConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) - listConfigurationsInput := &kafka.ListConfigurationsInput{} - - var configuration *kafka.Configuration - err := conn.ListConfigurationsPagesWithContext(ctx, listConfigurationsInput, func(page *kafka.ListConfigurationsOutput, lastPage bool) bool { - for _, config := range page.Configurations { - if aws.StringValue(config.Name) == d.Get("name").(string) { - configuration = config - break - } - } - - return !lastPage + input := &kafka.ListConfigurationsInput{} + configuration, err := findConfiguration(ctx, conn, input, func(v *types.Configuration) bool { + return aws.ToString(v.Name) == d.Get("name").(string) }) if err != nil { - return sdkdiag.AppendErrorf(diags, "listing MSK Configurations: %s", err) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Configuration", err)) } - if configuration == nil { - return sdkdiag.AppendErrorf(diags, "reading MSK Configuration: no results found") - } + configurationARN := aws.ToString(configuration.Arn) + revision := aws.ToInt64(configuration.LatestRevision.Revision) - if configuration.LatestRevision == nil { - return sdkdiag.AppendErrorf(diags, "describing MSK Configuration (%s): missing latest revision", d.Id()) - } + revisionOutput, err := findConfigurationRevisionByTwoPartKey(ctx, conn, configurationARN, revision) - revision := configuration.LatestRevision.Revision - revisionInput := &kafka.DescribeConfigurationRevisionInput{ - Arn: configuration.Arn, - Revision: revision, + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading MSK Configuration (%s) revision (%d): %s", d.Id(), revision, err) } - revisionOutput, err := conn.DescribeConfigurationRevisionWithContext(ctx, revisionInput) + d.SetId(configurationARN) + d.Set("arn", configurationARN) + d.Set("description", configuration.Description) + d.Set("kafka_versions", configuration.KafkaVersions) + d.Set("latest_revision", revision) + d.Set("name", configuration.Name) + d.Set("server_properties", string(revisionOutput.ServerProperties)) + + return diags +} + +func findConfiguration(ctx context.Context, conn *kafka.Client, input *kafka.ListConfigurationsInput, filter tfslices.Predicate[*types.Configuration]) (*types.Configuration, error) { + output, err := findConfigurations(ctx, conn, input, filter) if err != nil { - return sdkdiag.AppendErrorf(diags, "describing MSK Configuration (%s) Revision (%d): %s", d.Id(), aws.Int64Value(revision), err) + return nil, err } - if revisionOutput == nil { - return sdkdiag.AppendErrorf(diags, "describing MSK Configuration (%s) Revision (%d): missing result", d.Id(), aws.Int64Value(revision)) - } + return tfresource.AssertSingleValueResult(output) +} - d.Set("arn", configuration.Arn) - d.Set("description", configuration.Description) +func findConfigurations(ctx context.Context, conn *kafka.Client, input *kafka.ListConfigurationsInput, filter tfslices.Predicate[*types.Configuration]) ([]types.Configuration, error) { + var output []types.Configuration - if err := d.Set("kafka_versions", aws.StringValueSlice(configuration.KafkaVersions)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting kafka_versions: %s", err) - } + pages := kafka.NewListConfigurationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - d.Set("latest_revision", revision) - d.Set("name", configuration.Name) - d.Set("server_properties", string(revisionOutput.ServerProperties)) + if err != nil { + return nil, err + } - d.SetId(aws.StringValue(configuration.Arn)) + for _, v := range page.Configurations { + if v.LatestRevision == nil { + continue + } - return diags + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil } diff --git a/internal/service/kafka/configuration_data_source_test.go b/internal/service/kafka/configuration_data_source_test.go index 53cdf439047..7a8b8536eb5 100644 --- a/internal/service/kafka/configuration_data_source_test.go +++ b/internal/service/kafka/configuration_data_source_test.go @@ -7,10 +7,10 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafka" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccKafkaConfigurationDataSource_name(t *testing.T) { @@ -21,7 +21,7 @@ func TestAccKafkaConfigurationDataSource_name(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckConfigurationDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/kafka/configuration_test.go b/internal/service/kafka/configuration_test.go index dfb4c816a2a..bddbfe1be58 100644 --- a/internal/service/kafka/configuration_test.go +++ b/internal/service/kafka/configuration_test.go @@ -9,15 +9,15 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/service/kafka" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfkafka "github.com/hashicorp/terraform-provider-aws/internal/service/kafka" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccKafkaConfiguration_basic(t *testing.T) { @@ -28,7 +28,7 @@ func TestAccKafkaConfiguration_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -61,7 +61,7 @@ func TestAccKafkaConfiguration_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -85,7 +85,7 @@ func TestAccKafkaConfiguration_description(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -121,7 +121,7 @@ func TestAccKafkaConfiguration_kafkaVersions(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -153,7 +153,7 @@ func TestAccKafkaConfiguration_serverProperties(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckConfigurationDestroy(ctx), Steps: []resource.TestStep{ @@ -183,20 +183,16 @@ func TestAccKafkaConfiguration_serverProperties(t *testing.T) { func testAccCheckConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_msk_configuration" { continue } - input := &kafka.DescribeConfigurationInput{ - Arn: aws.String(rs.Primary.ID), - } - - output, err := conn.DescribeConfigurationWithContext(ctx, input) + _, err := tfkafka.FindConfigurationByARN(ctx, conn, rs.Primary.ID) - if tfawserr.ErrMessageContains(err, kafka.ErrCodeBadRequestException, "Configuration ARN does not exist") { + if tfresource.NotFound(err) { continue } @@ -204,39 +200,29 @@ func testAccCheckConfigurationDestroy(ctx context.Context) resource.TestCheckFun return err } - if output != nil { - return fmt.Errorf("MSK Configuration (%s) still exists", rs.Primary.ID) - } + return fmt.Errorf("MSK Configuration %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckConfigurationExists(ctx context.Context, resourceName string, configuration *kafka.DescribeConfigurationOutput) resource.TestCheckFunc { +func testAccCheckConfigurationExists(ctx context.Context, n string, v *kafka.DescribeConfigurationOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("Resource ID not set: %s", resourceName) - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) - - input := &kafka.DescribeConfigurationInput{ - Arn: aws.String(rs.Primary.ID), - } + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) - output, err := conn.DescribeConfigurationWithContext(ctx, input) + output, err := tfkafka.FindConfigurationByARN(ctx, conn, rs.Primary.ID) if err != nil { - return fmt.Errorf("error describing MSK Cluster (%s): %s", rs.Primary.ID, err) + return err } - *configuration = *output + *v = *output return nil } diff --git a/internal/service/kafka/enum.go b/internal/service/kafka/enum.go index 0f9b48dd8b1..83152b779fa 100644 --- a/internal/service/kafka/enum.go +++ b/internal/service/kafka/enum.go @@ -4,20 +4,22 @@ package kafka const ( - ClusterOperationStatePending = "PENDING" - ClusterOperationStateUpdateComplete = "UPDATE_COMPLETE" - ClusterOperationStateUpdateFailed = "UPDATE_FAILED" - ClusterOperationStateUpdateInProgress = "UPDATE_IN_PROGRESS" + clusterOperationStatePending = "PENDING" + clusterOperationStateUpdateComplete = "UPDATE_COMPLETE" + clusterOperationStateUpdateFailed = "UPDATE_FAILED" + clusterOperationStateUpdateInProgress = "UPDATE_IN_PROGRESS" ) +type publicAccessType string + const ( - PublicAccessTypeDisabled = "DISABLED" - PublicAccessTypeServiceProvidedEIPs = "SERVICE_PROVIDED_EIPS" + publicAccessTypeDisabled publicAccessType = "DISABLED" + publicAccessTypeServiceProvidedEIPs publicAccessType = "SERVICE_PROVIDED_EIPS" ) -func PublicAccessType_Values() []string { - return []string{ - PublicAccessTypeDisabled, - PublicAccessTypeServiceProvidedEIPs, +func (publicAccessType) Values() []publicAccessType { + return []publicAccessType{ + publicAccessTypeDisabled, + publicAccessTypeServiceProvidedEIPs, } } diff --git a/internal/service/kafka/exports_test.go b/internal/service/kafka/exports_test.go new file mode 100644 index 00000000000..4951e1908c6 --- /dev/null +++ b/internal/service/kafka/exports_test.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kafka + +// Exports for use in tests only. +var ( + ResourceCluster = resourceCluster + ResourceClusterPolicy = resourceClusterPolicy + ResourceConfiguration = resourceConfiguration + ResourceReplicator = resourceReplicator + ResourceSCRAMSecretAssociation = resourceSCRAMSecretAssociation + ResourceServerlessCluster = resourceServerlessCluster + ResourceVPCConnection = resourceVPCConnection + + FindClusterByARN = findClusterByARN + FindClusterPolicyByARN = findClusterPolicyByARN + FindConfigurationByARN = findConfigurationByARN + FindReplicatorByARN = findReplicatorByARN + FindSCRAMSecretsByClusterARN = findSCRAMSecretsByClusterARN + FindServerlessClusterByARN = findServerlessClusterByARN + FindVPCConnectionByARN = findVPCConnectionByARN +) diff --git a/internal/service/kafka/find.go b/internal/service/kafka/find.go deleted file mode 100644 index 747d9493940..00000000000 --- a/internal/service/kafka/find.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kafka - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindClusterByARN(ctx context.Context, conn *kafka.Kafka, arn string) (*kafka.ClusterInfo, error) { - input := &kafka.DescribeClusterInput{ - ClusterArn: aws.String(arn), - } - - output, err := conn.DescribeClusterWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.ClusterInfo == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.ClusterInfo, nil -} - -func findClusterV2ByARN(ctx context.Context, conn *kafka.Kafka, arn string) (*kafka.Cluster, error) { - input := &kafka.DescribeClusterV2Input{ - ClusterArn: aws.String(arn), - } - - output, err := conn.DescribeClusterV2WithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.ClusterInfo == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.ClusterInfo, nil -} - -func FindClusterOperationByARN(ctx context.Context, conn *kafka.Kafka, arn string) (*kafka.ClusterOperationInfo, error) { - input := &kafka.DescribeClusterOperationInput{ - ClusterOperationArn: aws.String(arn), - } - - output, err := conn.DescribeClusterOperationWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.ClusterOperationInfo == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.ClusterOperationInfo, nil -} - -func FindConfigurationByARN(ctx context.Context, conn *kafka.Kafka, arn string) (*kafka.DescribeConfigurationOutput, error) { - input := &kafka.DescribeConfigurationInput{ - Arn: aws.String(arn), - } - - output, err := conn.DescribeConfigurationWithContext(ctx, input) - - if tfawserr.ErrMessageContains(err, kafka.ErrCodeBadRequestException, "Configuration ARN does not exist") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - -// FindScramSecrets returns the matching MSK Cluster's associated secrets -func FindScramSecrets(ctx context.Context, conn *kafka.Kafka, clusterArn string) ([]*string, error) { - input := &kafka.ListScramSecretsInput{ - ClusterArn: aws.String(clusterArn), - } - - var scramSecrets []*string - err := conn.ListScramSecretsPagesWithContext(ctx, input, func(page *kafka.ListScramSecretsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - scramSecrets = append(scramSecrets, page.SecretArnList...) - return !lastPage - }) - - return scramSecrets, err -} - -func FindServerlessClusterByARN(ctx context.Context, conn *kafka.Kafka, arn string) (*kafka.Cluster, error) { - output, err := findClusterV2ByARN(ctx, conn, arn) - - if err != nil { - return nil, err - } - - if output.Serverless == nil { - return nil, tfresource.NewEmptyResultError(arn) - } - - return output, nil -} diff --git a/internal/service/kafka/generate.go b/internal/service/kafka/generate.go index 1ea2f12076f..4a769ee01ee 100644 --- a/internal/service/kafka/generate.go +++ b/internal/service/kafka/generate.go @@ -1,8 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ServiceTagsMap -UpdateTags -//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -TagsFunc=TagsV2 -KeyValueTagsFunc=keyValueTagsV2 -GetTagsInFunc=getTagsInV2 -SetTagsOutFunc=setTagsOutV2 -ServiceTagsMap -KVTValues -SkipAWSImp -SkipTypesImp -- tagsv2_gen.go +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ServiceTagsMap -UpdateTags -ServiceTagsMap -KVTValues -SkipTypesImp //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/kafka/kafka_version_data_source.go b/internal/service/kafka/kafka_version_data_source.go index ce764f55e7a..01c5a524ef7 100644 --- a/internal/service/kafka/kafka_version_data_source.go +++ b/internal/service/kafka/kafka_version_data_source.go @@ -6,18 +6,22 @@ package kafka import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKDataSource("aws_msk_kafka_version") -func DataSourceVersion() *schema.Resource { +// @SDKDataSource("aws_msk_kafka_version", name="Kafka Version") +func dataSourceKafkaVersion() *schema.Resource { // nosemgrep:ci.kafka-in-func-name return &schema.Resource{ - ReadWithoutTimeout: dataSourceVersionRead, + ReadWithoutTimeout: dataSourceKafkaVersionRead, Schema: map[string]*schema.Schema{ "preferred_versions": { @@ -40,72 +44,64 @@ func DataSourceVersion() *schema.Resource { } } -func findVersion(preferredVersions []interface{}, versions []*kafka.KafkaVersion) *kafka.KafkaVersion { - var found *kafka.KafkaVersion - - for _, v := range preferredVersions { - preferredVersion, ok := v.(string) - - if !ok { - continue - } - - for _, kafkaVersion := range versions { - if preferredVersion == aws.StringValue(kafkaVersion.Version) { - found = kafkaVersion - - break - } - } +func dataSourceKafkaVersionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { // nosemgrep:ci.kafka-in-func-name + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaClient(ctx) - if found != nil { - break - } + var preferredVersions []string + if v, ok := d.GetOk("preferred_versions"); ok && len(v.([]interface{})) > 0 { + preferredVersions = flex.ExpandStringValueList(v.([]interface{})) + } else if v, ok := d.GetOk("version"); ok { + preferredVersions = tfslices.Of(v.(string)) } - return found -} + kafkaVersion, err := findKafkaVersion(ctx, conn, preferredVersions) -func dataSourceVersionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) - - var kafkaVersions []*kafka.KafkaVersion + if err != nil { + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Kafka Version", err)) + } - err := conn.ListKafkaVersionsPagesWithContext(ctx, &kafka.ListKafkaVersionsInput{}, func(page *kafka.ListKafkaVersionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + version := aws.ToString(kafkaVersion.Version) + d.SetId(version) + d.Set("status", kafkaVersion.Status) + d.Set("version", version) - kafkaVersions = append(kafkaVersions, page.KafkaVersions...) + return diags +} - return !lastPage - }) +func findKafkaVersion(ctx context.Context, conn *kafka.Client, preferredVersions []string) (*types.KafkaVersion, error) { // nosemgrep:ci.kafka-in-func-name + input := &kafka.ListKafkaVersionsInput{} + output, err := findKafkaVersions(ctx, conn, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "listing Kafka versions: %s", err) + return nil, err } - if len(kafkaVersions) == 0 { - return sdkdiag.AppendErrorf(diags, "no Kafka versions found") + var kafkaVersions []types.KafkaVersion + for _, preferredVersion := range preferredVersions { + for _, kafkaVersion := range output { + if preferredVersion == aws.ToString(kafkaVersion.Version) { + kafkaVersions = append(kafkaVersions, kafkaVersion) + } + } } - var found *kafka.KafkaVersion + return tfresource.AssertFirstValueResult(kafkaVersions) +} - if v, ok := d.GetOk("preferred_versions"); ok { - found = findVersion(v.([]interface{}), kafkaVersions) - } else if v, ok := d.GetOk("version"); ok { - found = findVersion([]interface{}{v}, kafkaVersions) - } +func findKafkaVersions(ctx context.Context, conn *kafka.Client, input *kafka.ListKafkaVersionsInput) ([]types.KafkaVersion, error) { // nosemgrep:ci.kafka-in-func-name + var output []types.KafkaVersion - if found == nil { - return sdkdiag.AppendErrorf(diags, "no Kafka versions match the criteria") - } + pages := kafka.NewListKafkaVersionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - d.SetId(aws.StringValue(found.Version)) + if err != nil { + return nil, err + } - d.Set("status", found.Status) - d.Set("version", found.Version) + output = append(output, page.KafkaVersions...) + } - return diags + return output, nil } diff --git a/internal/service/kafka/kafka_version_data_source_test.go b/internal/service/kafka/kafka_version_data_source_test.go index 3020c1fba75..9194f2457db 100644 --- a/internal/service/kafka/kafka_version_data_source_test.go +++ b/internal/service/kafka/kafka_version_data_source_test.go @@ -8,10 +8,11 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccKafkaKafkaVersionDataSource_basic(t *testing.T) { @@ -21,7 +22,7 @@ func TestAccKafkaKafkaVersionDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccVersionPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: nil, Steps: []resource.TestStep{ @@ -42,7 +43,7 @@ func TestAccKafkaKafkaVersionDataSource_preferred(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccVersionPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: nil, Steps: []resource.TestStep{ @@ -58,11 +59,11 @@ func TestAccKafkaKafkaVersionDataSource_preferred(t *testing.T) { } func testAccVersionPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) input := &kafka.ListKafkaVersionsInput{} - _, err := conn.ListKafkaVersionsWithContext(ctx, input) + _, err := conn.ListKafkaVersions(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/kafka/replicator.go b/internal/service/kafka/replicator.go index 9786b654599..36f4ad381c2 100644 --- a/internal/service/kafka/replicator.go +++ b/internal/service/kafka/replicator.go @@ -5,7 +5,7 @@ package kafka import ( "context" - "errors" + "fmt" "log" "time" @@ -16,9 +16,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -28,7 +28,7 @@ import ( // @SDKResource("aws_msk_replicator", name="Replicator") // @Tags(identifierAttribute="id") -func ResourceReplicator() *schema.Resource { +func resourceReplicator() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicatorCreate, ReadWithoutTimeout: resourceReplicatorRead, @@ -228,39 +228,33 @@ func ResourceReplicator() *schema.Resource { } } -const ( - ResNameReplicator = "Replicator" -) - func resourceReplicatorCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).KafkaClient(ctx) - in := &kafka.CreateReplicatorInput{ - KafkaClusters: expandClusters(d.Get("kafka_cluster").([]interface{})), - ReplicationInfoList: expandReplicationInfoList(d.Get("replication_info_list").([]interface{})), - ReplicatorName: aws.String(d.Get("replicator_name").(string)), + name := d.Get("replicator_name").(string) + input := &kafka.CreateReplicatorInput{ + KafkaClusters: expandKafkaClusters(d.Get("kafka_cluster").([]interface{})), + ReplicationInfoList: expandReplicationInfos(d.Get("replication_info_list").([]interface{})), + ReplicatorName: aws.String(name), ServiceExecutionRoleArn: aws.String(d.Get("service_execution_role_arn").(string)), - Tags: getTagsInV2(ctx), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk("description"); ok { - in.Description = aws.String(v.(string)) + input.Description = aws.String(v.(string)) } - out, err := conn.CreateReplicator(ctx, in) - if err != nil { - return create.AppendDiagError(diags, names.Kafka, create.ErrActionCreating, ResNameReplicator, d.Get("replicator_name").(string), err) - } + output, err := conn.CreateReplicator(ctx, input) - if out == nil { - return create.AppendDiagError(diags, names.Kafka, create.ErrActionCreating, ResNameReplicator, d.Get("replicator_name").(string), errors.New("empty output")) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating MSK Replicator (%s): %s", name, err) } - d.SetId(aws.ToString(out.ReplicatorArn)) + d.SetId(aws.ToString(output.ReplicatorArn)) if _, err := waitReplicatorCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return create.AppendDiagError(diags, names.Kafka, create.ErrActionWaitingForCreation, ResNameReplicator, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for MSK Replicator (%s) create: %s", d.Id(), err) } return append(diags, resourceReplicatorRead(ctx, d, meta)...) @@ -271,7 +265,7 @@ func resourceReplicatorRead(ctx context.Context, d *schema.ResourceData, meta in conn := meta.(*conns.AWSClient).KafkaClient(ctx) - out, err := findReplicatorByARN(ctx, conn, d.Id()) + output, err := findReplicatorByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Kafka Replicator (%s) not found, removing from state", d.Id()) @@ -280,34 +274,30 @@ func resourceReplicatorRead(ctx context.Context, d *schema.ResourceData, meta in } if err != nil { - return create.AppendDiagError(diags, names.Kafka, create.ErrActionReading, ResNameReplicator, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading MSK Replicator (%s): %s", d.Id(), err) } - sourceAlias := out.ReplicationInfoList[0].SourceKafkaClusterAlias - targetAlias := out.ReplicationInfoList[0].TargetKafkaClusterAlias - clustersArn := out.KafkaClusters + sourceAlias := aws.ToString(output.ReplicationInfoList[0].SourceKafkaClusterAlias) + targetAlias := aws.ToString(output.ReplicationInfoList[0].TargetKafkaClusterAlias) + var sourceARN, targetARN *string - var sourceARN *string - var targetARN *string - - for _, arn := range clustersArn { - clusterAlias := aws.ToString(arn.KafkaClusterAlias) - if clusterAlias == aws.ToString(sourceAlias) { - sourceARN = arn.AmazonMskCluster.MskClusterArn - } else if clusterAlias == aws.ToString(targetAlias) { - targetARN = arn.AmazonMskCluster.MskClusterArn + for _, cluster := range output.KafkaClusters { + if clusterAlias := aws.ToString(cluster.KafkaClusterAlias); clusterAlias == sourceAlias { + sourceARN = cluster.AmazonMskCluster.MskClusterArn + } else if clusterAlias == targetAlias { + targetARN = cluster.AmazonMskCluster.MskClusterArn } } - d.Set("arn", out.ReplicatorArn) - d.Set("current_version", out.CurrentVersion) - d.Set("replicator_name", out.ReplicatorName) - d.Set("description", out.ReplicatorDescription) - d.Set("service_execution_role_arn", out.ServiceExecutionRoleArn) - d.Set("kafka_cluster", flattenClusters(out.KafkaClusters)) - d.Set("replication_info_list", flattenReplicationInfoList(out.ReplicationInfoList, sourceARN, targetARN)) + d.Set("arn", output.ReplicatorArn) + d.Set("current_version", output.CurrentVersion) + d.Set("description", output.ReplicatorDescription) + d.Set("kafka_cluster", flattenKafkaClusterDescriptions(output.KafkaClusters)) + d.Set("replication_info_list", flattenReplicationInfoDescriptions(output.ReplicationInfoList, sourceARN, targetARN)) + d.Set("replicator_name", output.ReplicatorName) + d.Set("service_execution_role_arn", output.ServiceExecutionRoleArn) - setTagsOutV2(ctx, out.Tags) + setTagsOut(ctx, output.Tags) return diags } @@ -318,35 +308,33 @@ func resourceReplicatorUpdate(ctx context.Context, d *schema.ResourceData, meta conn := meta.(*conns.AWSClient).KafkaClient(ctx) if d.HasChangesExcept("tags", "tags_all") { - in := &kafka.UpdateReplicationInfoInput{ - ReplicatorArn: aws.String(d.Id()), + input := &kafka.UpdateReplicationInfoInput{ CurrentVersion: aws.String(d.Get("current_version").(string)), + ReplicatorArn: aws.String(d.Id()), SourceKafkaClusterArn: aws.String(d.Get("replication_info_list.0.source_kafka_cluster_arn").(string)), TargetKafkaClusterArn: aws.String(d.Get("replication_info_list.0.target_kafka_cluster_arn").(string)), } if d.HasChanges("replication_info_list.0.consumer_group_replication") { if v, ok := d.GetOk("replication_info_list.0.consumer_group_replication"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - in.ConsumerGroupReplication = expandConsumerGroupReplicationUpdate(v.([]interface{})[0].(map[string]interface{})) + input.ConsumerGroupReplication = expandConsumerGroupReplicationUpdate(v.([]interface{})[0].(map[string]interface{})) } } if d.HasChanges("replication_info_list.0.topic_replication") { if v, ok := d.GetOk("replication_info_list.0.topic_replication"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - in.TopicReplication = expandTopicReplicationUpdate(v.([]interface{})[0].(map[string]interface{})) + input.TopicReplication = expandTopicReplicationUpdate(v.([]interface{})[0].(map[string]interface{})) } } - log.Printf("[DEBUG] Updating Kafka Replicator (%s): %#v", d.Id(), in) - - out, err := conn.UpdateReplicationInfo(ctx, in) + _, err := conn.UpdateReplicationInfo(ctx, input) if err != nil { - return create.AppendDiagError(diags, names.Kafka, create.ErrActionUpdating, ResNameReplicator, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating MSK Replicator (%s): %s", d.Id(), err) } - if _, err := waitReplicatorUpdated(ctx, conn, aws.ToString(out.ReplicatorArn), d.Timeout(schema.TimeoutUpdate)); err != nil { - return create.AppendDiagError(diags, names.Kafka, create.ErrActionWaitingForUpdate, ResNameReplicator, d.Id(), err) + if _, err := waitReplicatorUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for MSK Replicator (%s) update: %s", d.Id(), err) } } @@ -357,8 +345,7 @@ func resourceReplicatorDelete(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).KafkaClient(ctx) - log.Printf("[INFO] Deleting Kafka Replicator %s", d.Id()) - + log.Printf("[INFO] Deleting MSK Replicator: %s", d.Id()) _, err := conn.DeleteReplicator(ctx, &kafka.DeleteReplicatorInput{ ReplicatorArn: aws.String(d.Id()), }) @@ -366,12 +353,13 @@ func resourceReplicatorDelete(ctx context.Context, d *schema.ResourceData, meta if errs.IsA[*types.NotFoundException](err) { return diags } + if err != nil { - return create.AppendDiagError(diags, names.Kafka, create.ErrActionDeleting, ResNameReplicator, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting MSK Replicator (%s): %s", d.Id(), err) } if _, err := waitReplicatorDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return create.AppendDiagError(diags, names.Kafka, create.ErrActionWaitingForDeletion, ResNameReplicator, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for MSK Replicator (%s) delete: %s", d.Id(), err) } return diags @@ -386,8 +374,12 @@ func waitReplicatorCreated(ctx context.Context, conn *kafka.Client, arn string, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*kafka.DescribeReplicatorOutput); ok { - return out, err + if output, ok := outputRaw.(*kafka.DescribeReplicatorOutput); ok { + if stateInfo := output.StateInfo; stateInfo != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateInfo.Code), aws.ToString(stateInfo.Message))) + } + + return output, err } return nil, err @@ -402,8 +394,12 @@ func waitReplicatorUpdated(ctx context.Context, conn *kafka.Client, arn string, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*kafka.DescribeReplicatorOutput); ok { - return out, err + if output, ok := outputRaw.(*kafka.DescribeReplicatorOutput); ok { + if stateInfo := output.StateInfo; stateInfo != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateInfo.Code), aws.ToString(stateInfo.Message))) + } + + return output, err } return nil, err @@ -418,8 +414,12 @@ func waitReplicatorDeleted(ctx context.Context, conn *kafka.Client, arn string, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*kafka.DescribeReplicatorOutput); ok { - return out, err + if output, ok := outputRaw.(*kafka.DescribeReplicatorOutput); ok { + if stateInfo := output.StateInfo; stateInfo != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateInfo.Code), aws.ToString(stateInfo.Message))) + } + + return output, err } return nil, err @@ -427,7 +427,8 @@ func waitReplicatorDeleted(ctx context.Context, conn *kafka.Client, arn string, func statusReplicator(ctx context.Context, conn *kafka.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findReplicatorByARN(ctx, conn, arn) + output, err := findReplicatorByARN(ctx, conn, arn) + if tfresource.NotFound(err) { return nil, "", nil } @@ -436,21 +437,21 @@ func statusReplicator(ctx context.Context, conn *kafka.Client, arn string) retry return nil, "", err } - return out, string(out.ReplicatorState), nil + return output, string(output.ReplicatorState), nil } } func findReplicatorByARN(ctx context.Context, conn *kafka.Client, arn string) (*kafka.DescribeReplicatorOutput, error) { - in := &kafka.DescribeReplicatorInput{ + input := &kafka.DescribeReplicatorInput{ ReplicatorArn: aws.String(arn), } - out, err := conn.DescribeReplicator(ctx, in) + output, err := conn.DescribeReplicator(ctx, input) if errs.IsA[*types.NotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, - LastRequest: in, + LastRequest: input, } } @@ -458,14 +459,14 @@ func findReplicatorByARN(ctx context.Context, conn *kafka.Client, arn string) (* return nil, err } - if out == nil { - return nil, tfresource.NewEmptyResultError(in) + if output == nil { + return nil, tfresource.NewEmptyResultError(input) } - return out, nil + return output, nil } -func flattenReplicationInfoList(apiObjects []types.ReplicationInfoDescription, sourceCluster, targetCluster *string) []interface{} { +func flattenReplicationInfoDescriptions(apiObjects []types.ReplicationInfoDescription, sourceCluster, targetCluster *string) []interface{} { if len(apiObjects) == 0 { return nil } @@ -473,13 +474,13 @@ func flattenReplicationInfoList(apiObjects []types.ReplicationInfoDescription, s var tfList []interface{} for _, apiObject := range apiObjects { - tfList = append(tfList, flattenReplicationInfo(apiObject, sourceCluster, targetCluster)) + tfList = append(tfList, flattenReplicationInfoDescription(apiObject, sourceCluster, targetCluster)) } return tfList } -func flattenReplicationInfo(apiObject types.ReplicationInfoDescription, sourceCluster, targetCluster *string) map[string]interface{} { +func flattenReplicationInfoDescription(apiObject types.ReplicationInfoDescription, sourceCluster, targetCluster *string) map[string]interface{} { tfMap := map[string]interface{}{} if v := sourceCluster; v != nil { @@ -569,7 +570,7 @@ func flattenTopicReplication(apiObject *types.TopicReplication) map[string]inter return tfMap } -func flattenClusters(apiObjects []types.KafkaClusterDescription) []interface{} { +func flattenKafkaClusterDescriptions(apiObjects []types.KafkaClusterDescription) []interface{} { // nosemgrep:ci.kafka-in-func-name if len(apiObjects) == 0 { return nil } @@ -577,27 +578,27 @@ func flattenClusters(apiObjects []types.KafkaClusterDescription) []interface{} { var tfList []interface{} for _, apiObject := range apiObjects { - tfList = append(tfList, flattenCluster(apiObject)) + tfList = append(tfList, flattenKafkaClusterDescription(apiObject)) } return tfList } -func flattenCluster(apiObject types.KafkaClusterDescription) map[string]interface{} { +func flattenKafkaClusterDescription(apiObject types.KafkaClusterDescription) map[string]interface{} { // nosemgrep:ci.kafka-in-func-name tfMap := map[string]interface{}{} if v := apiObject.AmazonMskCluster; v != nil { - tfMap["amazon_msk_cluster"] = []interface{}{flattenAmazonCluster(v)} + tfMap["amazon_msk_cluster"] = []interface{}{flattenAmazonMSKCluster(v)} } if v := apiObject.VpcConfig; v != nil { - tfMap["vpc_config"] = []interface{}{flattenClusterClientVPCConfig(v)} + tfMap["vpc_config"] = []interface{}{flattenKafkaClusterClientVPCConfig(v)} } return tfMap } -func flattenClusterClientVPCConfig(apiObject *types.KafkaClusterClientVpcConfig) map[string]interface{} { +func flattenKafkaClusterClientVPCConfig(apiObject *types.KafkaClusterClientVpcConfig) map[string]interface{} { // nosemgrep:ci.kafka-in-func-name if apiObject == nil { return nil } @@ -615,7 +616,7 @@ func flattenClusterClientVPCConfig(apiObject *types.KafkaClusterClientVpcConfig) return tfMap } -func flattenAmazonCluster(apiObject *types.AmazonMskCluster) map[string]interface{} { +func flattenAmazonMSKCluster(apiObject *types.AmazonMskCluster) map[string]interface{} { // nosemgrep:ci.msk-in-func-name if apiObject == nil { return nil } @@ -675,7 +676,7 @@ func expandTopicReplicationUpdate(tfMap map[string]interface{}) *types.TopicRepl return apiObject } -func expandReplicationInfoList(tfList []interface{}) []types.ReplicationInfo { +func expandReplicationInfos(tfList []interface{}) []types.ReplicationInfo { if len(tfList) == 0 { return nil } @@ -771,7 +772,7 @@ func expandTopicReplication(tfMap map[string]interface{}) *types.TopicReplicatio return apiObject } -func expandClusters(tfList []interface{}) []types.KafkaCluster { +func expandKafkaClusters(tfList []interface{}) []types.KafkaCluster { // nosemgrep:ci.kafka-in-func-name if len(tfList) == 0 { return nil } @@ -785,7 +786,7 @@ func expandClusters(tfList []interface{}) []types.KafkaCluster { continue } - apiObject := expandCluster(tfMap) + apiObject := expandKafkaCluster(tfMap) apiObjects = append(apiObjects, apiObject) } @@ -793,21 +794,21 @@ func expandClusters(tfList []interface{}) []types.KafkaCluster { return apiObjects } -func expandCluster(tfMap map[string]interface{}) types.KafkaCluster { +func expandKafkaCluster(tfMap map[string]interface{}) types.KafkaCluster { // nosemgrep:ci.kafka-in-func-name apiObject := types.KafkaCluster{} if v, ok := tfMap["vpc_config"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.VpcConfig = expandClusterClientVPCConfig(v[0].(map[string]interface{})) + apiObject.VpcConfig = expandKafkaClusterClientVPCConfig(v[0].(map[string]interface{})) } if v, ok := tfMap["amazon_msk_cluster"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - apiObject.AmazonMskCluster = expandAmazonCluster(v[0].(map[string]interface{})) + apiObject.AmazonMskCluster = expandAmazonMSKCluster(v[0].(map[string]interface{})) } return apiObject } -func expandClusterClientVPCConfig(tfMap map[string]interface{}) *types.KafkaClusterClientVpcConfig { +func expandKafkaClusterClientVPCConfig(tfMap map[string]interface{}) *types.KafkaClusterClientVpcConfig { // nosemgrep:ci.kafka-in-func-name apiObject := &types.KafkaClusterClientVpcConfig{} if v, ok := tfMap["security_groups_ids"].(*schema.Set); ok && v.Len() > 0 { @@ -821,7 +822,7 @@ func expandClusterClientVPCConfig(tfMap map[string]interface{}) *types.KafkaClus return apiObject } -func expandAmazonCluster(tfMap map[string]interface{}) *types.AmazonMskCluster { +func expandAmazonMSKCluster(tfMap map[string]interface{}) *types.AmazonMskCluster { // nosemgrep:ci.msk-in-func-name apiObject := &types.AmazonMskCluster{} if v, ok := tfMap["msk_cluster_arn"].(string); ok && v != "" { diff --git a/internal/service/kafka/replicator_test.go b/internal/service/kafka/replicator_test.go index 00118727617..aac652b2e35 100644 --- a/internal/service/kafka/replicator_test.go +++ b/internal/service/kafka/replicator_test.go @@ -5,21 +5,17 @@ package kafka_test import ( "context" - "errors" "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/kafka" - "github.com/aws/aws-sdk-go-v2/service/kafka/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" - "github.com/hashicorp/terraform-provider-aws/internal/errs" tfkafka "github.com/hashicorp/terraform-provider-aws/internal/service/kafka" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -246,44 +242,39 @@ func testAccCheckReplicatorDestroy(ctx context.Context) resource.TestCheckFunc { continue } - _, err := conn.DescribeReplicator(ctx, &kafka.DescribeReplicatorInput{ - ReplicatorArn: aws.String(rs.Primary.ID), - }) - if errs.IsA[*types.NotFoundException](err) { + _, err := tfkafka.FindReplicatorByARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { continue } + if err != nil { return err } - return create.Error(names.Kafka, create.ErrActionCheckingDestroyed, tfkafka.ResNameReplicator, rs.Primary.ID, errors.New("not destroyed")) + return fmt.Errorf("MSK Replicator %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckReplicatorExists(ctx context.Context, name string, replicator *kafka.DescribeReplicatorOutput) resource.TestCheckFunc { +func testAccCheckReplicatorExists(ctx context.Context, n string, v *kafka.DescribeReplicatorOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.Kafka, create.ErrActionCheckingExistence, tfkafka.ResNameReplicator, name, errors.New("not found")) - } - - if rs.Primary.ID == "" { - return create.Error(names.Kafka, create.ErrActionCheckingExistence, tfkafka.ResNameReplicator, name, errors.New("not set")) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) - resp, err := conn.DescribeReplicator(ctx, &kafka.DescribeReplicatorInput{ - ReplicatorArn: aws.String(rs.Primary.ID), - }) + + output, err := tfkafka.FindReplicatorByARN(ctx, conn, rs.Primary.ID) if err != nil { - return create.Error(names.Kafka, create.ErrActionCheckingExistence, tfkafka.ResNameReplicator, rs.Primary.ID, err) + return err } - *replicator = *resp + *v = *output return nil } diff --git a/internal/service/kafka/scram_secret_association.go b/internal/service/kafka/scram_secret_association.go index 537659bd16e..183e5017647 100644 --- a/internal/service/kafka/scram_secret_association.go +++ b/internal/service/kafka/scram_secret_association.go @@ -5,35 +5,41 @@ package kafka import ( "context" + "errors" "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/go-multierror" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) const ( - ScramSecretBatchSize = 10 + scramSecretBatchSize = 10 ) -// @SDKResource("aws_msk_scram_secret_association") -func ResourceScramSecretAssociation() *schema.Resource { +// @SDKResource("aws_msk_scram_secret_association", name="SCRAM Secret Association) +func resourceSCRAMSecretAssociation() *schema.Resource { return &schema.Resource{ - CreateWithoutTimeout: resourceScramSecretAssociationCreate, - ReadWithoutTimeout: resourceScramSecretAssociationRead, - UpdateWithoutTimeout: resourceScramSecretAssociationUpdate, - DeleteWithoutTimeout: resourceScramSecretAssociationDelete, + CreateWithoutTimeout: resourceSCRAMSecretAssociationCreate, + ReadWithoutTimeout: resourceSCRAMSecretAssociationRead, + UpdateWithoutTimeout: resourceSCRAMSecretAssociationUpdate, + DeleteWithoutTimeout: resourceSCRAMSecretAssociationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, + Schema: map[string]*schema.Schema{ "cluster_arn": { Type: schema.TypeString, @@ -53,168 +59,169 @@ func ResourceScramSecretAssociation() *schema.Resource { } } -func resourceScramSecretAssociationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceSCRAMSecretAssociationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) - clusterArn := d.Get("cluster_arn").(string) - secretArnList := flex.ExpandStringSet(d.Get("secret_arn_list").(*schema.Set)) + clusterARN := d.Get("cluster_arn").(string) - output, err := associateClusterSecrets(ctx, conn, clusterArn, secretArnList) - if err != nil { - return sdkdiag.AppendErrorf(diags, "associating scram secret(s) to MSK cluster (%s): %s", clusterArn, err) + if err := associateSRAMSecrets(ctx, conn, clusterARN, flex.ExpandStringValueSet(d.Get("secret_arn_list").(*schema.Set))); err != nil { + return sdkdiag.AppendErrorf(diags, "creating MSK SCRAM Secret Association (%s): %s", clusterARN, err) } - d.SetId(aws.StringValue(output.ClusterArn)) + d.SetId(clusterARN) - if len(output.UnprocessedScramSecrets) != 0 { - return sdkdiag.AppendErrorf(diags, "associating scram secret(s) to MSK cluster (%s): %s", clusterArn, unprocessedScramSecretsError(output.UnprocessedScramSecrets)) - } - - return append(diags, resourceScramSecretAssociationRead(ctx, d, meta)...) + return append(diags, resourceSCRAMSecretAssociationRead(ctx, d, meta)...) } -func resourceScramSecretAssociationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceSCRAMSecretAssociationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) - secretArnList, err := FindScramSecrets(ctx, conn, d.Id()) + scramSecrets, err := findSCRAMSecretsByClusterARN(ctx, conn, d.Id()) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { - log.Printf("[WARN] Scram secret(s) for MSK cluster (%s) not found, removing from state", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] MSK SCRAM Secret Association (%s) not found, removing from state", d.Id()) d.SetId("") return diags } + if err != nil { - return sdkdiag.AppendErrorf(diags, "reading MSK cluster (%s) scram secret(s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading MSK SCRAM Secret Association (%s): %s", d.Id(), err) } d.Set("cluster_arn", d.Id()) - if err := d.Set("secret_arn_list", flex.FlattenStringSet(secretArnList)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting secret_arn_list: %s", err) - } + d.Set("secret_arn_list", scramSecrets) return diags } -func resourceScramSecretAssociationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceSCRAMSecretAssociationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) o, n := d.GetChange("secret_arn_list") - oldSet, newSet := o.(*schema.Set), n.(*schema.Set) - - if newSet.Len() > 0 { - if newSecrets := newSet.Difference(oldSet); newSecrets.Len() > 0 { - output, err := associateClusterSecrets(ctx, conn, d.Id(), flex.ExpandStringSet(newSecrets)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "associating scram secret(s) with MSK cluster (%s): %s", d.Id(), err) - } + os, ns := o.(*schema.Set), n.(*schema.Set) - if len(output.UnprocessedScramSecrets) != 0 { - return sdkdiag.AppendErrorf(diags, "associating scram secret(s) to MSK cluster (%s): %s", d.Id(), unprocessedScramSecretsError(output.UnprocessedScramSecrets)) - } + if add := flex.ExpandStringValueSet(ns.Difference(os)); len(add) > 0 { + if err := associateSRAMSecrets(ctx, conn, d.Id(), add); err != nil { + return sdkdiag.AppendErrorf(diags, "updating MSK SCRAM Secret Association (%s): %s", d.Id(), err) } } - if oldSet.Len() > 0 { - if deleteSecrets := oldSet.Difference(newSet); deleteSecrets.Len() > 0 { - output, err := disassociateClusterSecrets(ctx, conn, d.Id(), flex.ExpandStringSet(deleteSecrets)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "disassociating scram secret(s) from MSK cluster (%s): %s", d.Id(), err) - } - - if len(output.UnprocessedScramSecrets) != 0 { - return sdkdiag.AppendErrorf(diags, "disassociating scram secret(s) from MSK cluster (%s): %s", d.Id(), unprocessedScramSecretsError(output.UnprocessedScramSecrets)) - } + if del := flex.ExpandStringValueSet(os.Difference(ns)); len(del) > 0 { + if err := disassociateSRAMSecrets(ctx, conn, d.Id(), del); err != nil { + return sdkdiag.AppendErrorf(diags, "updating MSK SCRAM Secret Association (%s): %s", d.Id(), err) } } - return append(diags, resourceScramSecretAssociationRead(ctx, d, meta)...) + return append(diags, resourceSCRAMSecretAssociationRead(ctx, d, meta)...) } -func resourceScramSecretAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceSCRAMSecretAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) - secretArnList, err := FindScramSecrets(ctx, conn, d.Id()) + err := disassociateSRAMSecrets(ctx, conn, d.Id(), flex.ExpandStringValueSet(d.Get("secret_arn_list").(*schema.Set))) - if err != nil { - if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { - return diags - } - return sdkdiag.AppendErrorf(diags, "reading scram secret(s) for MSK cluster (%s): %s", d.Id(), err) + if errs.IsA[*types.NotFoundException](err) { + return diags } - if len(secretArnList) > 0 { - output, err := disassociateClusterSecrets(ctx, conn, d.Id(), secretArnList) - if err != nil { - if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { - return diags - } - return sdkdiag.AppendErrorf(diags, "disassociating scram secret(s) from MSK cluster (%s): %s", d.Id(), err) - } - if len(output.UnprocessedScramSecrets) != 0 { - return sdkdiag.AppendErrorf(diags, "disassociating scram secret(s) from MSK cluster (%s): %s", d.Id(), unprocessedScramSecretsError(output.UnprocessedScramSecrets)) - } + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting MSK SCRAM Secret Association (%s): %s", d.Id(), err) } return diags } -func associateClusterSecrets(ctx context.Context, conn *kafka.Kafka, clusterArn string, secretArnList []*string) (*kafka.BatchAssociateScramSecretOutput, error) { - output := &kafka.BatchAssociateScramSecretOutput{} +func findSCRAMSecretsByClusterARN(ctx context.Context, conn *kafka.Client, clusterARN string) ([]string, error) { + input := &kafka.ListScramSecretsInput{ + ClusterArn: aws.String(clusterARN), + } + var output []string + + pages := kafka.NewListScramSecretsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for i := 0; i < len(secretArnList); i += ScramSecretBatchSize { - end := i + ScramSecretBatchSize - if end > len(secretArnList) { - end = len(secretArnList) + if errs.IsA[*types.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } } - resp, err := conn.BatchAssociateScramSecretWithContext(ctx, &kafka.BatchAssociateScramSecretInput{ - ClusterArn: aws.String(clusterArn), - SecretArnList: secretArnList[i:end], - }) if err != nil { return nil, err } - output.ClusterArn = resp.ClusterArn - output.UnprocessedScramSecrets = append(output.UnprocessedScramSecrets, resp.UnprocessedScramSecrets...) + output = append(output, page.SecretArnList...) } + return output, nil } -func disassociateClusterSecrets(ctx context.Context, conn *kafka.Kafka, clusterArn string, secretArnList []*string) (*kafka.BatchDisassociateScramSecretOutput, error) { - output := &kafka.BatchDisassociateScramSecretOutput{} +func associateSRAMSecrets(ctx context.Context, conn *kafka.Client, clusterARN string, secretARNs []string) error { + for _, chunk := range tfslices.Chunks(secretARNs, scramSecretBatchSize) { + input := &kafka.BatchAssociateScramSecretInput{ + ClusterArn: aws.String(clusterARN), + SecretArnList: chunk, + } + + output, err := conn.BatchAssociateScramSecret(ctx, input) - for i := 0; i < len(secretArnList); i += ScramSecretBatchSize { - end := i + ScramSecretBatchSize - if end > len(secretArnList) { - end = len(secretArnList) + if err == nil { + err = unprocessedScramSecretsError(output.UnprocessedScramSecrets, false) } - resp, err := conn.BatchDisassociateScramSecretWithContext(ctx, &kafka.BatchDisassociateScramSecretInput{ - ClusterArn: aws.String(clusterArn), - SecretArnList: secretArnList[i:end], - }) if err != nil { - return nil, err + return err + } + } + + return nil +} + +func disassociateSRAMSecrets(ctx context.Context, conn *kafka.Client, clusterARN string, secretARNs []string) error { + for _, chunk := range tfslices.Chunks(secretARNs, scramSecretBatchSize) { + input := &kafka.BatchDisassociateScramSecretInput{ + ClusterArn: aws.String(clusterARN), + SecretArnList: chunk, + } + + output, err := conn.BatchDisassociateScramSecret(ctx, input) + + if err == nil { + err = unprocessedScramSecretsError(output.UnprocessedScramSecrets, true) } - output.ClusterArn = resp.ClusterArn - output.UnprocessedScramSecrets = append(output.UnprocessedScramSecrets, resp.UnprocessedScramSecrets...) + if err != nil { + return err + } } - return output, nil + + return nil } -func unprocessedScramSecretsError(secrets []*kafka.UnprocessedScramSecret) error { - var errors *multierror.Error +func unprocessedScramSecretsError(apiObjects []types.UnprocessedScramSecret, ignoreInvalidSecretARN bool) error { + var errs []error + + for _, apiObject := range apiObjects { + if ignoreInvalidSecretARN && aws.ToString(apiObject.ErrorCode) == "InvalidSecretArn" { + continue + } - for _, s := range secrets { - secretArn, errMsg := aws.StringValue(s.SecretArn), aws.StringValue(s.ErrorMessage) - errors = multierror.Append(errors, fmt.Errorf("scram secret (%s): %s", secretArn, errMsg)) + err := unprocessedScramSecretError(&apiObject) + + if err != nil { + errs = append(errs, fmt.Errorf("%s: %w", aws.ToString(apiObject.SecretArn), err)) + } } - return errors.ErrorOrNil() + return errors.Join(errs...) +} + +func unprocessedScramSecretError(apiObject *types.UnprocessedScramSecret) error { + return fmt.Errorf("%s: %s", aws.ToString(apiObject.ErrorCode), aws.ToString(apiObject.ErrorMessage)) } diff --git a/internal/service/kafka/scram_secret_association_test.go b/internal/service/kafka/scram_secret_association_test.go index 620b219bc3c..06f2f5d8256 100644 --- a/internal/service/kafka/scram_secret_association_test.go +++ b/internal/service/kafka/scram_secret_association_test.go @@ -8,15 +8,14 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfkafka "github.com/hashicorp/terraform-provider-aws/internal/service/kafka" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccKafkaScramSecretAssociation_basic(t *testing.T) { @@ -28,7 +27,7 @@ func TestAccKafkaScramSecretAssociation_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckScramSecretAssociationDestroy(ctx), Steps: []resource.TestStep{ @@ -60,7 +59,7 @@ func TestAccKafkaScramSecretAssociation_update(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckScramSecretAssociationDestroy(ctx), Steps: []resource.TestStep{ @@ -105,7 +104,7 @@ func TestAccKafkaScramSecretAssociation_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckScramSecretAssociationDestroy(ctx), Steps: []resource.TestStep{ @@ -113,7 +112,7 @@ func TestAccKafkaScramSecretAssociation_disappears(t *testing.T) { Config: testAccScramSecretAssociationConfig_basic(rName, 1), Check: resource.ComposeTestCheckFunc( testAccCheckScramSecretAssociationExists(ctx, resourceName), - acctest.CheckResourceDisappears(ctx, acctest.Provider, tfkafka.ResourceScramSecretAssociation(), resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfkafka.ResourceSCRAMSecretAssociation(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -129,7 +128,7 @@ func TestAccKafkaScramSecretAssociation_Disappears_cluster(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckScramSecretAssociationDestroy(ctx), Steps: []resource.TestStep{ @@ -152,36 +151,35 @@ func testAccCheckScramSecretAssociationDestroy(ctx context.Context) resource.Tes continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) - input := &kafka.ListScramSecretsInput{ - ClusterArn: aws.String(rs.Primary.ID), + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) + + _, err := tfkafka.FindSCRAMSecretsByClusterARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue } - _, err := conn.ListScramSecretsWithContext(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, kafka.ErrCodeNotFoundException) { - continue - } return err } + + return fmt.Errorf("MSK Cluster %s still exists", rs.Primary.ID) } + return nil } } -func testAccCheckScramSecretAssociationExists(ctx context.Context, resourceName string) resource.TestCheckFunc { +func testAccCheckScramSecretAssociationExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set for %s", resourceName) - } + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) - _, err := tfkafka.FindScramSecrets(ctx, conn, rs.Primary.ID) + _, err := tfkafka.FindSCRAMSecretsByClusterARN(ctx, conn, rs.Primary.ID) return err } diff --git a/internal/service/kafka/serverless_cluster.go b/internal/service/kafka/serverless_cluster.go index c044cd0fe56..2f599ca6bf6 100644 --- a/internal/service/kafka/serverless_cluster.go +++ b/internal/service/kafka/serverless_cluster.go @@ -8,8 +8,9 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -24,7 +25,7 @@ import ( // @SDKResource("aws_msk_serverless_cluster", name="Serverless Cluster") // @Tags(identifierAttribute="id") -func ResourceServerlessCluster() *schema.Resource { +func resourceServerlessCluster() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceServerlessClusterCreate, ReadWithoutTimeout: resourceServerlessClusterRead, @@ -127,26 +128,25 @@ func ResourceServerlessCluster() *schema.Resource { func resourceServerlessClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConn(ctx) + conn := meta.(*conns.AWSClient).KafkaClient(ctx) name := d.Get("cluster_name").(string) input := &kafka.CreateClusterV2Input{ ClusterName: aws.String(name), - Serverless: &kafka.ServerlessRequest{ + Serverless: &types.ServerlessRequest{ ClientAuthentication: expandServerlessClientAuthentication(d.Get("client_authentication").([]interface{})[0].(map[string]interface{})), VpcConfigs: expandVpcConfigs(d.Get("vpc_config").([]interface{})), }, Tags: getTagsIn(ctx), } - output, err := conn.CreateClusterV2WithContext(ctx, input) + output, err := conn.CreateClusterV2(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK Serverless Cluster (%s): %s", name, err) } - d.SetId(aws.StringValue(output.ClusterArn)) + d.SetId(aws.ToString(output.ClusterArn)) if _, err := waitClusterCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Serverless Cluster (%s) create: %s", d.Id(), err) @@ -157,10 +157,9 @@ func resourceServerlessClusterCreate(ctx context.Context, d *schema.ResourceData func resourceServerlessClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConn(ctx) - - cluster, err := FindServerlessClusterByARN(ctx, conn, d.Id()) + cluster, err := findServerlessClusterByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] MSK Serverless Cluster (%s) not found, removing from state", d.Id()) @@ -172,7 +171,7 @@ func resourceServerlessClusterRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading MSK Serverless Cluster (%s): %s", d.Id(), err) } - clusterARN := aws.StringValue(cluster.ClusterArn) + clusterARN := aws.ToString(cluster.ClusterArn) d.Set("arn", clusterARN) if cluster.Serverless.ClientAuthentication != nil { if err := d.Set("client_authentication", []interface{}{flattenServerlessClientAuthentication(cluster.Serverless.ClientAuthentication)}); err != nil { @@ -194,16 +193,33 @@ func resourceServerlessClusterRead(ctx context.Context, d *schema.ResourceData, } func resourceServerlessClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + // Tags only. - return resourceServerlessClusterRead(ctx, d, meta) + + return append(diags, resourceServerlessClusterRead(ctx, d, meta)...) +} + +func findServerlessClusterByARN(ctx context.Context, conn *kafka.Client, arn string) (*types.Cluster, error) { + output, err := findClusterV2ByARN(ctx, conn, arn) + + if err != nil { + return nil, err + } + + if output.Serverless == nil { + return nil, tfresource.NewEmptyResultError(arn) + } + + return output, nil } -func expandServerlessClientAuthentication(tfMap map[string]interface{}) *kafka.ServerlessClientAuthentication { +func expandServerlessClientAuthentication(tfMap map[string]interface{}) *types.ServerlessClientAuthentication { if tfMap == nil { return nil } - apiObject := &kafka.ServerlessClientAuthentication{} + apiObject := &types.ServerlessClientAuthentication{} if v, ok := tfMap["sasl"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.Sasl = expandServerlessSasl(v[0].(map[string]interface{})) @@ -212,12 +228,12 @@ func expandServerlessClientAuthentication(tfMap map[string]interface{}) *kafka.S return apiObject } -func expandServerlessSasl(tfMap map[string]interface{}) *kafka.ServerlessSasl { // nosemgrep:ci.caps2-in-func-name +func expandServerlessSasl(tfMap map[string]interface{}) *types.ServerlessSasl { // nosemgrep:ci.caps2-in-func-name if tfMap == nil { return nil } - apiObject := &kafka.ServerlessSasl{} + apiObject := &types.ServerlessSasl{} if v, ok := tfMap["iam"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.Iam = expandIam(v[0].(map[string]interface{})) @@ -226,12 +242,12 @@ func expandServerlessSasl(tfMap map[string]interface{}) *kafka.ServerlessSasl { return apiObject } -func expandIam(tfMap map[string]interface{}) *kafka.Iam { // nosemgrep:ci.caps4-in-func-name +func expandIam(tfMap map[string]interface{}) *types.Iam { // nosemgrep:ci.caps4-in-func-name if tfMap == nil { return nil } - apiObject := &kafka.Iam{} + apiObject := &types.Iam{} if v, ok := tfMap["enabled"].(bool); ok { apiObject.Enabled = aws.Bool(v) @@ -240,7 +256,7 @@ func expandIam(tfMap map[string]interface{}) *kafka.Iam { // nosemgrep:ci.caps4- return apiObject } -func flattenServerlessClientAuthentication(apiObject *kafka.ServerlessClientAuthentication) map[string]interface{} { +func flattenServerlessClientAuthentication(apiObject *types.ServerlessClientAuthentication) map[string]interface{} { if apiObject == nil { return nil } @@ -254,7 +270,7 @@ func flattenServerlessClientAuthentication(apiObject *kafka.ServerlessClientAuth return tfMap } -func flattenServerlessSasl(apiObject *kafka.ServerlessSasl) map[string]interface{} { // nosemgrep:ci.caps2-in-func-name +func flattenServerlessSasl(apiObject *types.ServerlessSasl) map[string]interface{} { // nosemgrep:ci.caps2-in-func-name if apiObject == nil { return nil } @@ -268,7 +284,7 @@ func flattenServerlessSasl(apiObject *kafka.ServerlessSasl) map[string]interface return tfMap } -func flattenIam(apiObject *kafka.Iam) map[string]interface{} { // nosemgrep:ci.caps4-in-func-name +func flattenIam(apiObject *types.Iam) map[string]interface{} { // nosemgrep:ci.caps4-in-func-name if apiObject == nil { return nil } @@ -276,36 +292,36 @@ func flattenIam(apiObject *kafka.Iam) map[string]interface{} { // nosemgrep:ci.c tfMap := map[string]interface{}{} if v := apiObject.Enabled; v != nil { - tfMap["enabled"] = aws.BoolValue(v) + tfMap["enabled"] = aws.ToBool(v) } return tfMap } -func expandVpcConfig(tfMap map[string]interface{}) *kafka.VpcConfig { // nosemgrep:ci.caps5-in-func-name +func expandVpcConfig(tfMap map[string]interface{}) *types.VpcConfig { // nosemgrep:ci.caps5-in-func-name if tfMap == nil { return nil } - apiObject := &kafka.VpcConfig{} + apiObject := &types.VpcConfig{} if v, ok := tfMap["security_group_ids"].(*schema.Set); ok && v.Len() > 0 { - apiObject.SecurityGroupIds = flex.ExpandStringSet(v) + apiObject.SecurityGroupIds = flex.ExpandStringValueSet(v) } if v, ok := tfMap["subnet_ids"].(*schema.Set); ok && v.Len() > 0 { - apiObject.SubnetIds = flex.ExpandStringSet(v) + apiObject.SubnetIds = flex.ExpandStringValueSet(v) } return apiObject } -func expandVpcConfigs(tfList []interface{}) []*kafka.VpcConfig { // nosemgrep:ci.caps5-in-func-name +func expandVpcConfigs(tfList []interface{}) []types.VpcConfig { // nosemgrep:ci.caps5-in-func-name if len(tfList) == 0 { return nil } - var apiObjects []*kafka.VpcConfig + var apiObjects []types.VpcConfig for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -320,31 +336,27 @@ func expandVpcConfigs(tfList []interface{}) []*kafka.VpcConfig { // nosemgrep:ci continue } - apiObjects = append(apiObjects, apiObject) + apiObjects = append(apiObjects, *apiObject) } return apiObjects } -func flattenVpcConfig(apiObject *kafka.VpcConfig) map[string]interface{} { // nosemgrep:ci.caps5-in-func-name - if apiObject == nil { - return nil - } - +func flattenVpcConfig(apiObject types.VpcConfig) map[string]interface{} { // nosemgrep:ci.caps5-in-func-name tfMap := map[string]interface{}{} if v := apiObject.SecurityGroupIds; v != nil { - tfMap["security_group_ids"] = aws.StringValueSlice(v) + tfMap["security_group_ids"] = v } if v := apiObject.SubnetIds; v != nil { - tfMap["subnet_ids"] = aws.StringValueSlice(v) + tfMap["subnet_ids"] = v } return tfMap } -func flattenVpcConfigs(apiObjects []*kafka.VpcConfig) []interface{} { // nosemgrep:ci.caps5-in-func-name +func flattenVpcConfigs(apiObjects []types.VpcConfig) []interface{} { // nosemgrep:ci.caps5-in-func-name if len(apiObjects) == 0 { return nil } @@ -352,10 +364,6 @@ func flattenVpcConfigs(apiObjects []*kafka.VpcConfig) []interface{} { // nosemgr var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenVpcConfig(apiObject)) } diff --git a/internal/service/kafka/serverless_cluster_test.go b/internal/service/kafka/serverless_cluster_test.go index bb4cea74402..8c4b41b906f 100644 --- a/internal/service/kafka/serverless_cluster_test.go +++ b/internal/service/kafka/serverless_cluster_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -17,17 +17,18 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfkafka "github.com/hashicorp/terraform-provider-aws/internal/service/kafka" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccKafkaServerlessCluster_basic(t *testing.T) { ctx := acctest.Context(t) - var v kafka.Cluster + var v types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_serverless_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServerlessClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -59,13 +60,13 @@ func TestAccKafkaServerlessCluster_basic(t *testing.T) { func TestAccKafkaServerlessCluster_disappears(t *testing.T) { ctx := acctest.Context(t) - var v kafka.Cluster + var v types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_serverless_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServerlessClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -83,13 +84,13 @@ func TestAccKafkaServerlessCluster_disappears(t *testing.T) { func TestAccKafkaServerlessCluster_tags(t *testing.T) { ctx := acctest.Context(t) - var v kafka.Cluster + var v types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_serverless_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServerlessClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -129,13 +130,13 @@ func TestAccKafkaServerlessCluster_tags(t *testing.T) { func TestAccKafkaServerlessCluster_securityGroup(t *testing.T) { ctx := acctest.Context(t) - var v kafka.Cluster + var v types.Cluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_msk_serverless_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, kafka.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.KafkaEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckServerlessClusterDestroy(ctx), Steps: []resource.TestStep{ @@ -159,7 +160,7 @@ func TestAccKafkaServerlessCluster_securityGroup(t *testing.T) { func testAccCheckServerlessClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_msk_serverless_cluster" { @@ -183,7 +184,7 @@ func testAccCheckServerlessClusterDestroy(ctx context.Context) resource.TestChec } } -func testAccCheckServerlessClusterExists(ctx context.Context, n string, v *kafka.Cluster) resource.TestCheckFunc { +func testAccCheckServerlessClusterExists(ctx context.Context, n string, v *types.Cluster) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -194,7 +195,7 @@ func testAccCheckServerlessClusterExists(ctx context.Context, n string, v *kafka return fmt.Errorf("No MSK Serverless Cluster ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaClient(ctx) output, err := tfkafka.FindServerlessClusterByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/kafka/service_package.go b/internal/service/kafka/service_package.go index 651653a72a5..848690b75b3 100644 --- a/internal/service/kafka/service_package.go +++ b/internal/service/kafka/service_package.go @@ -6,19 +6,28 @@ package kafka import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - request_sdkv1 "github.com/aws/aws-sdk-go/aws/request" - kafka_sdkv1 "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -// CustomizeConn customizes a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) CustomizeConn(ctx context.Context, conn *kafka_sdkv1.Kafka) (*kafka_sdkv1.Kafka, error) { - conn.Handlers.Retry.PushBack(func(r *request_sdkv1.Request) { - if tfawserr.ErrMessageContains(r.Error, kafka_sdkv1.ErrCodeTooManyRequestsException, "Too Many Requests") { - r.Retryable = aws_sdkv1.Bool(true) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*kafka.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws.Config)) + + return kafka.NewFromConfig(cfg, func(o *kafka.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws.String(endpoint) } - }) - return conn, nil + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*types.TooManyRequestsException](err, "Too Many Requests") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }), nil } diff --git a/internal/service/kafka/service_package_gen.go b/internal/service/kafka/service_package_gen.go index ab6c64e0ac5..bb3859192be 100644 --- a/internal/service/kafka/service_package_gen.go +++ b/internal/service/kafka/service_package_gen.go @@ -5,11 +5,6 @@ package kafka import ( "context" - aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" - kafka_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafka" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - kafka_sdkv1 "github.com/aws/aws-sdk-go/service/kafka" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -28,23 +23,32 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceBrokerNodes, + Factory: dataSourceBootstrapBrokers, + TypeName: "aws_msk_bootstrap_brokers", + Name: "Bootstrap Brokers", + }, + { + Factory: dataSourceBrokerNodes, TypeName: "aws_msk_broker_nodes", + Name: "Broker Nodes", }, { - Factory: DataSourceCluster, + Factory: dataSourceCluster, TypeName: "aws_msk_cluster", + Name: "Cluster", }, { - Factory: DataSourceConfiguration, + Factory: dataSourceConfiguration, TypeName: "aws_msk_configuration", + Name: "Configuration", }, { - Factory: DataSourceVersion, + Factory: dataSourceKafkaVersion, TypeName: "aws_msk_kafka_version", + Name: "Kafka Version", }, { - Factory: DataSourceVPCConnection, + Factory: dataSourceVPCConnection, TypeName: "aws_msk_vpc_connection", Name: "VPC Connection", Tags: &types.ServicePackageResourceTags{}, @@ -55,7 +59,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceCluster, + Factory: resourceCluster, TypeName: "aws_msk_cluster", Name: "Cluster", Tags: &types.ServicePackageResourceTags{ @@ -63,16 +67,17 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceClusterPolicy, + Factory: resourceClusterPolicy, TypeName: "aws_msk_cluster_policy", Name: "Cluster Policy", }, { - Factory: ResourceConfiguration, + Factory: resourceConfiguration, TypeName: "aws_msk_configuration", + Name: "Configuration", }, { - Factory: ResourceReplicator, + Factory: resourceReplicator, TypeName: "aws_msk_replicator", Name: "Replicator", Tags: &types.ServicePackageResourceTags{ @@ -80,11 +85,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceScramSecretAssociation, + Factory: resourceSCRAMSecretAssociation, TypeName: "aws_msk_scram_secret_association", + Name: "SCRAM Secret Association", }, { - Factory: ResourceServerlessCluster, + Factory: resourceServerlessCluster, TypeName: "aws_msk_serverless_cluster", Name: "Serverless Cluster", Tags: &types.ServicePackageResourceTags{ @@ -92,7 +98,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceVPCConnection, + Factory: resourceVPCConnection, TypeName: "aws_msk_vpc_connection", Name: "VPC Connection", Tags: &types.ServicePackageResourceTags{ @@ -106,24 +112,6 @@ func (p *servicePackage) ServicePackageName() string { return names.Kafka } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*kafka_sdkv1.Kafka, error) { - sess := config["session"].(*session_sdkv1.Session) - - return kafka_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil -} - -// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. -func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*kafka_sdkv2.Client, error) { - cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - - return kafka_sdkv2.NewFromConfig(cfg, func(o *kafka_sdkv2.Options) { - if endpoint := config["endpoint"].(string); endpoint != "" { - o.BaseEndpoint = aws_sdkv2.String(endpoint) - } - }), nil -} - func ServicePackage(ctx context.Context) conns.ServicePackage { return &servicePackage{} } diff --git a/internal/service/kafka/status.go b/internal/service/kafka/status.go deleted file mode 100644 index 63bc4548ba8..00000000000 --- a/internal/service/kafka/status.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kafka - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func statusClusterState(ctx context.Context, conn *kafka.Kafka, arn string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := findClusterV2ByARN(ctx, conn, arn) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.State), nil - } -} - -func statusClusterOperationState(ctx context.Context, conn *kafka.Kafka, arn string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindClusterOperationByARN(ctx, conn, arn) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.OperationState), nil - } -} - -func statusConfigurationState(ctx context.Context, conn *kafka.Kafka, arn string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindConfigurationByARN(ctx, conn, arn) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.State), nil - } -} diff --git a/internal/service/kafka/sweep.go b/internal/service/kafka/sweep.go index bf73e6ffb85..6d76ab291fd 100644 --- a/internal/service/kafka/sweep.go +++ b/internal/service/kafka/sweep.go @@ -7,11 +7,12 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -39,39 +40,36 @@ func sweepClusters(region string) error { return fmt.Errorf("error getting client: %s", err) } input := &kafka.ListClustersV2Input{} - conn := client.KafkaConn(ctx) + conn := client.KafkaClient(ctx) sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListClustersV2PagesWithContext(ctx, input, func(page *kafka.ListClustersV2Output, lastPage bool) bool { - if page == nil { - return !lastPage + pages := kafka.NewListClustersV2Paginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping MSK Cluster sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing MSK Clusters (%s): %w", region, err) } for _, v := range page.ClusterInfoList { - arn := aws.StringValue(v.ClusterArn) + arn := aws.ToString(v.ClusterArn) - if state := aws.StringValue(v.State); state == kafka.ClusterStateDeleting { + if state := v.State; state == types.ClusterStateDeleting { log.Printf("[INFO] Skipping MSK Cluster %s: State=%s", arn, state) continue } - r := ResourceCluster() + r := resourceCluster() d := r.Data(nil) d.SetId(arn) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping MSK Cluster sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing MSK Clusters (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -89,39 +87,37 @@ func sweepConfigurations(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.KafkaConn(ctx) + conn := client.KafkaClient(ctx) input := &kafka.ListConfigurationsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListConfigurationsPagesWithContext(ctx, input, func(page *kafka.ListConfigurationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := kafka.NewListConfigurationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping MSK Configuration sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing MSK Configurations (%s): %w", region, err) } for _, v := range page.Configurations { - arn := aws.StringValue(v.Arn) + arn := aws.ToString(v.Arn) - if state := aws.StringValue(v.State); state == kafka.ConfigurationStateDeleting { + if state := v.State; state == types.ConfigurationStateDeleting { log.Printf("[INFO] Skipping MSK Configuration %s: State=%s", arn, state) continue } - r := ResourceConfiguration() + r := resourceConfiguration() d := r.Data(nil) d.SetId(arn) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping MSK Configuration sweep for %s: %s", region, err) - return nil - } - if err != nil { - return fmt.Errorf("error listing MSK Configurations (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) diff --git a/internal/service/kafka/tags_gen.go b/internal/service/kafka/tags_gen.go index 66d97f1c1ba..9194ba4caf3 100644 --- a/internal/service/kafka/tags_gen.go +++ b/internal/service/kafka/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/aws/aws-sdk-go/service/kafka/kafkaiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafka" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -16,21 +15,21 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// map[string]*string handling +// map[string]string handling // Tags returns kafka service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from kafka service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns kafka service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -41,7 +40,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets kafka service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -50,7 +49,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates kafka service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn kafkaiface.KafkaAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *kafka.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*kafka.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -61,10 +60,10 @@ func updateTags(ctx context.Context, conn kafkaiface.KafkaAPI, identifier string if len(removedTags) > 0 { input := &kafka.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -79,7 +78,7 @@ func updateTags(ctx context.Context, conn kafkaiface.KafkaAPI, identifier string Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -92,5 +91,5 @@ func updateTags(ctx context.Context, conn kafkaiface.KafkaAPI, identifier string // UpdateTags updates kafka service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).KafkaConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).KafkaClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/kafka/tagsv2_gen.go b/internal/service/kafka/tagsv2_gen.go deleted file mode 100644 index 3e59f4933cc..00000000000 --- a/internal/service/kafka/tagsv2_gen.go +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated by internal/generate/tags/main.go; DO NOT EDIT. -package kafka - -import ( - "context" - - tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types/option" -) - -// map[string]string handling - -// TagsV2 returns kafka service tags. -func TagsV2(tags tftags.KeyValueTags) map[string]string { - return tags.Map() -} - -// keyValueTagsV2 creates tftags.KeyValueTags from kafka service tags. -func keyValueTagsV2(ctx context.Context, tags map[string]string) tftags.KeyValueTags { - return tftags.New(ctx, tags) -} - -// getTagsInV2 returns kafka service tags from Context. -// nil is returned if there are no input tags. -func getTagsInV2(ctx context.Context) map[string]string { - if inContext, ok := tftags.FromContext(ctx); ok { - if tags := TagsV2(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { - return tags - } - } - - return nil -} - -// setTagsOutV2 sets kafka service tags in Context. -func setTagsOutV2(ctx context.Context, tags map[string]string) { - if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = option.Some(keyValueTagsV2(ctx, tags)) - } -} diff --git a/internal/service/kafka/vpc_connection.go b/internal/service/kafka/vpc_connection.go index f581d63e6e6..202579f3089 100644 --- a/internal/service/kafka/vpc_connection.go +++ b/internal/service/kafka/vpc_connection.go @@ -27,7 +27,7 @@ import ( // @SDKResource("aws_msk_vpc_connection", name="VPC Connection") // @Tags(identifierAttribute="id") -func ResourceVPCConnection() *schema.Resource { +func resourceVPCConnection() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceVPCConnectionCreate, ReadWithoutTimeout: resourceVPCConnectionRead, @@ -82,22 +82,22 @@ func resourceVPCConnectionCreate(ctx context.Context, d *schema.ResourceData, me var diags diag.Diagnostics conn := meta.(*conns.AWSClient).KafkaClient(ctx) - in := &kafka.CreateVpcConnectionInput{ + input := &kafka.CreateVpcConnectionInput{ Authentication: aws.String(d.Get("authentication").(string)), ClientSubnets: flex.ExpandStringValueSet(d.Get("client_subnets").(*schema.Set)), SecurityGroups: flex.ExpandStringValueSet(d.Get("security_groups").(*schema.Set)), - Tags: getTagsInV2(ctx), + Tags: getTagsIn(ctx), TargetClusterArn: aws.String(d.Get("target_cluster_arn").(string)), VpcId: aws.String(d.Get("vpc_id").(string)), } - out, err := conn.CreateVpcConnection(ctx, in) + output, err := conn.CreateVpcConnection(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK VPC Connection: %s", err) } - d.SetId(aws.ToString(out.VpcConnectionArn)) + d.SetId(aws.ToString(output.VpcConnectionArn)) if _, err := waitVPCConnectionCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK VPC Connection (%s) create: %s", d.Id(), err) @@ -110,7 +110,7 @@ func resourceVPCConnectionRead(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).KafkaClient(ctx) - out, err := FindVPCConnectionByARN(ctx, conn, d.Id()) + output, err := findVPCConnectionByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] MSK VPC Connection (%s) not found, removing from state", d.Id()) @@ -122,21 +122,23 @@ func resourceVPCConnectionRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "reading MSK VPC Connection (%s): %s", d.Id(), err) } - d.Set("arn", out.VpcConnectionArn) - d.Set("authentication", out.Authentication) - d.Set("client_subnets", flex.FlattenStringValueSet(out.Subnets)) - d.Set("security_groups", flex.FlattenStringValueSet(out.SecurityGroups)) - d.Set("target_cluster_arn", out.TargetClusterArn) - d.Set("vpc_id", out.VpcId) + d.Set("arn", output.VpcConnectionArn) + d.Set("authentication", output.Authentication) + d.Set("client_subnets", flex.FlattenStringValueSet(output.Subnets)) + d.Set("security_groups", flex.FlattenStringValueSet(output.SecurityGroups)) + d.Set("target_cluster_arn", output.TargetClusterArn) + d.Set("vpc_id", output.VpcId) - setTagsOutV2(ctx, out.Tags) + setTagsOut(ctx, output.Tags) return diags } func resourceVPCConnectionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + // Tags only. + return append(diags, resourceVPCConnectionRead(ctx, d, meta)...) } @@ -175,8 +177,8 @@ func waitVPCConnectionCreated(ctx context.Context, conn *kafka.Client, id string } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*kafka.DescribeVpcConnectionOutput); ok { - return out, err + if output, ok := outputRaw.(*kafka.DescribeVpcConnectionOutput); ok { + return output, err } return nil, err @@ -191,8 +193,8 @@ func waitVPCConnectionDeleted(ctx context.Context, conn *kafka.Client, arn strin } outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*kafka.DescribeVpcConnectionOutput); ok { - return out, err + if output, ok := outputRaw.(*kafka.DescribeVpcConnectionOutput); ok { + return output, err } return nil, err @@ -200,7 +202,7 @@ func waitVPCConnectionDeleted(ctx context.Context, conn *kafka.Client, arn strin func statusVPCConnection(ctx context.Context, conn *kafka.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := FindVPCConnectionByARN(ctx, conn, arn) + output, err := findVPCConnectionByARN(ctx, conn, arn) if tfresource.NotFound(err) { return nil, "", nil @@ -210,21 +212,21 @@ func statusVPCConnection(ctx context.Context, conn *kafka.Client, arn string) re return nil, "", err } - return out, string(out.State), nil + return output, string(output.State), nil } } -func FindVPCConnectionByARN(ctx context.Context, conn *kafka.Client, arn string) (*kafka.DescribeVpcConnectionOutput, error) { - in := &kafka.DescribeVpcConnectionInput{ +func findVPCConnectionByARN(ctx context.Context, conn *kafka.Client, arn string) (*kafka.DescribeVpcConnectionOutput, error) { + input := &kafka.DescribeVpcConnectionInput{ Arn: aws.String(arn), } - out, err := conn.DescribeVpcConnection(ctx, in) + output, err := conn.DescribeVpcConnection(ctx, input) if errs.IsA[*types.NotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, - LastRequest: in, + LastRequest: input, } } @@ -232,9 +234,9 @@ func FindVPCConnectionByARN(ctx context.Context, conn *kafka.Client, arn string) return nil, err } - if out == nil { - return nil, tfresource.NewEmptyResultError(in) + if output == nil { + return nil, tfresource.NewEmptyResultError(input) } - return out, nil + return output, nil } diff --git a/internal/service/kafka/vpc_connection_data_source.go b/internal/service/kafka/vpc_connection_data_source.go index 5c803e21959..094a55bd131 100644 --- a/internal/service/kafka/vpc_connection_data_source.go +++ b/internal/service/kafka/vpc_connection_data_source.go @@ -18,7 +18,7 @@ import ( // @SDKDataSource("aws_msk_vpc_connection", name="VPC Connection") // @Tags -func DataSourceVPCConnection() *schema.Resource { +func dataSourceVPCConnection() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceVPCConnectionRead, @@ -59,21 +59,21 @@ func dataSourceVPCConnectionRead(ctx context.Context, d *schema.ResourceData, me conn := meta.(*conns.AWSClient).KafkaClient(ctx) arn := d.Get("arn").(string) - out, err := FindVPCConnectionByARN(ctx, conn, arn) + output, err := findVPCConnectionByARN(ctx, conn, arn) if err != nil { return sdkdiag.AppendErrorf(diags, "reading MSK VPC Connection (%s): %s", arn, err) } - d.SetId(aws.ToString(out.VpcConnectionArn)) - d.Set("arn", out.VpcConnectionArn) - d.Set("authentication", out.Authentication) - d.Set("client_subnets", flex.FlattenStringValueSet(out.Subnets)) - d.Set("security_groups", flex.FlattenStringValueSet(out.SecurityGroups)) - d.Set("target_cluster_arn", out.TargetClusterArn) - d.Set("vpc_id", out.VpcId) + d.SetId(aws.ToString(output.VpcConnectionArn)) + d.Set("arn", output.VpcConnectionArn) + d.Set("authentication", output.Authentication) + d.Set("client_subnets", flex.FlattenStringValueSet(output.Subnets)) + d.Set("security_groups", flex.FlattenStringValueSet(output.SecurityGroups)) + d.Set("target_cluster_arn", output.TargetClusterArn) + d.Set("vpc_id", output.VpcId) - setTagsOutV2(ctx, out.Tags) + setTagsOut(ctx, output.Tags) return diags } diff --git a/internal/service/kafka/wait.go b/internal/service/kafka/wait.go deleted file mode 100644 index 350db152a3e..00000000000 --- a/internal/service/kafka/wait.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kafka - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafka" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -const ( - configurationDeletedTimeout = 5 * time.Minute -) - -func waitClusterCreated(ctx context.Context, conn *kafka.Kafka, arn string, timeout time.Duration) (*kafka.Cluster, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafka.ClusterStateCreating}, - Target: []string{kafka.ClusterStateActive}, - Refresh: statusClusterState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafka.Cluster); ok { - if state, stateInfo := aws.StringValue(output.State), output.StateInfo; state == kafka.ClusterStateFailed && stateInfo != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateInfo.Code), aws.StringValue(stateInfo.Message))) - } - - return output, err - } - - return nil, err -} - -func waitClusterDeleted(ctx context.Context, conn *kafka.Kafka, arn string, timeout time.Duration) (*kafka.Cluster, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafka.ClusterStateDeleting}, - Target: []string{}, - Refresh: statusClusterState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafka.Cluster); ok { - if state, stateInfo := aws.StringValue(output.State), output.StateInfo; state == kafka.ClusterStateFailed && stateInfo != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateInfo.Code), aws.StringValue(stateInfo.Message))) - } - - return output, err - } - - return nil, err -} - -func waitClusterOperationCompleted(ctx context.Context, conn *kafka.Kafka, arn string, timeout time.Duration) (*kafka.ClusterOperationInfo, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{ClusterOperationStatePending, ClusterOperationStateUpdateInProgress}, - Target: []string{ClusterOperationStateUpdateComplete}, - Refresh: statusClusterOperationState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafka.ClusterOperationInfo); ok { - if state, errorInfo := aws.StringValue(output.OperationState), output.ErrorInfo; state == ClusterOperationStateUpdateFailed && errorInfo != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(errorInfo.ErrorCode), aws.StringValue(errorInfo.ErrorString))) - } - - return output, err - } - - return nil, err -} - -func waitConfigurationDeleted(ctx context.Context, conn *kafka.Kafka, arn string) (*kafka.DescribeConfigurationOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafka.ConfigurationStateDeleting}, - Target: []string{}, - Refresh: statusConfigurationState(ctx, conn, arn), - Timeout: configurationDeletedTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafka.DescribeConfigurationOutput); ok { - return output, err - } - - return nil, err -} diff --git a/internal/tfresource/not_found_error.go b/internal/tfresource/not_found_error.go index 92013689c82..181142a1119 100644 --- a/internal/tfresource/not_found_error.go +++ b/internal/tfresource/not_found_error.go @@ -114,3 +114,10 @@ func AssertSingleValueResult[T any](a []T) (*T, error) { } return &a[0], nil } + +func AssertFirstValueResult[T any](a []T) (*T, error) { + if l := len(a); l == 0 { + return nil, NewEmptyResultError(nil) + } + return &a[0], nil +} diff --git a/names/data/names_data.csv b/names/data/names_data.csv index f7a00f2795f..83ab7656ebf 100644 --- a/names/data/names_data.csv +++ b/names/data/names_data.csv @@ -232,7 +232,7 @@ macie,macie,macie,macie,,macie,,,Macie,Macie,,1,,,aws_macie_,,macie_,Macie Class ,,,,,,,,,,,,,,,,,Mainframe Modernization,AWS,x,,,,,,No SDK support managedblockchain,managedblockchain,managedblockchain,managedblockchain,,managedblockchain,,,ManagedBlockchain,ManagedBlockchain,,1,,,aws_managedblockchain_,,managedblockchain_,Managed Blockchain,Amazon,,x,,,,, grafana,grafana,managedgrafana,grafana,,grafana,,managedgrafana;amg,Grafana,ManagedGrafana,,1,,,aws_grafana_,,grafana_,Managed Grafana,Amazon,,,,,,, -kafka,kafka,kafka,kafka,,kafka,,msk,Kafka,Kafka,,1,2,aws_msk_,aws_kafka_,,msk_,Managed Streaming for Kafka,Amazon,,,,,,, +kafka,kafka,kafka,kafka,,kafka,,msk,Kafka,Kafka,x,,2,aws_msk_,aws_kafka_,,msk_,Managed Streaming for Kafka,Amazon,,,,,,, kafkaconnect,kafkaconnect,kafkaconnect,kafkaconnect,,kafkaconnect,,,KafkaConnect,KafkaConnect,,1,,aws_mskconnect_,aws_kafkaconnect_,,mskconnect_,Managed Streaming for Kafka Connect,Amazon,,,,,,, ,,,,,,,,,,,,,,,,,Management Console,AWS,x,,,,,,No SDK support marketplace-catalog,marketplacecatalog,marketplacecatalog,marketplacecatalog,,marketplacecatalog,,,MarketplaceCatalog,MarketplaceCatalog,,1,,,aws_marketplacecatalog_,,marketplace_catalog_,Marketplace Catalog,AWS,,x,,,,, diff --git a/names/names.go b/names/names.go index 021a881c70a..296ae159a64 100644 --- a/names/names.go +++ b/names/names.go @@ -61,6 +61,7 @@ const ( Inspector2EndpointID = "inspector2" InternetMonitorEndpointID = "internetmonitor" IVSChatEndpointID = "ivschat" + KafkaEndpointID = "kafka" KendraEndpointID = "kendra" KeyspacesEndpointID = "keyspaces" KinesisEndpointID = "kinesis" diff --git a/website/docs/d/msk_bootstrap_brokers.html.markdown b/website/docs/d/msk_bootstrap_brokers.html.markdown new file mode 100644 index 00000000000..985df4c9eb7 --- /dev/null +++ b/website/docs/d/msk_bootstrap_brokers.html.markdown @@ -0,0 +1,40 @@ +--- +subcategory: "Managed Streaming for Kafka" +layout: "aws" +page_title: "AWS: aws_msk_bootstrap_brokers" +description: |- + Get a list of brokers that a client application can use to bootstrap. +--- + +# Data Source: aws_msk_bootstrap_brokers + +Get a list of brokers that a client application can use to bootstrap. + +## Example Usage + +```terraform +data "aws_msk_cluster" "example" { + cluster_arn = aws_msk_cluster.example.arn +} +``` + +## Argument Reference + +The following arguments are supported: + +* `cluster_arn` - (Required) ARN of the cluster the nodes belong to. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `bootstrap_brokers` - Comma separated list of one or more hostname:port pairs of kafka brokers suitable to bootstrap connectivity to the kafka cluster. +* `bootstrap_brokers_public_sasl_iam` - One or more DNS names (or IP addresses) and SASL IAM port pairs. +* `bootstrap_brokers_public_sasl_scram` - One or more DNS names (or IP addresses) and SASL SCRAM port pairs. +* `bootstrap_brokers_public_tls` - One or more DNS names (or IP addresses) and TLS port pairs. +* `bootstrap_brokers_sasl_iam` - One or more DNS names (or IP addresses) and SASL IAM port pairs. +* `bootstrap_brokers_sasl_scram` - One or more DNS names (or IP addresses) and SASL SCRAM port pairs. +* `bootstrap_brokers_tls` - One or more DNS names (or IP addresses) and TLS port pairs. +* `bootstrap_brokers_vpc_connectivity_sasl_iam` - A string containing one or more DNS names (or IP addresses) and SASL IAM port pairs for VPC connectivity. +* `bootstrap_brokers_vpc_connectivity_sasl_scram` - A string containing one or more DNS names (or IP addresses) and SASL SCRAM port pairs for VPC connectivity. +* `bootstrap_brokers_vpc_connectivity_tls` - A string containing one or more DNS names (or IP addresses) and TLS port pairs for VPC connectivity.