diff --git a/.changelog/38343.txt b/.changelog/38343.txt new file mode 100644 index 000000000000..176697421454 --- /dev/null +++ b/.changelog/38343.txt @@ -0,0 +1,11 @@ +```release-note:bug +resource/aws_fsx_openzfs_volume: Correctly set `tags` on Read +``` + +```release-note:bug +data-source/aws_fsx_openzfs_snapshot: Correctly set `tags` on Read +``` + +```release-note:bug +data-source/aws_fsx_ontap_storage_virtual_machine: Correctly set `tags` on Read +``` \ No newline at end of file diff --git a/go.mod b/go.mod index 9a6c90dd993a..f99f1406ca08 100644 --- a/go.mod +++ b/go.mod @@ -112,6 +112,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/firehose v1.32.0 github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 + github.com/aws/aws-sdk-go-v2/service/fsx v1.47.2 github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3 github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.27.0 github.com/aws/aws-sdk-go-v2/service/grafana v1.24.3 diff --git a/go.sum b/go.sum index bab8bbead3ab..4c2149df981a 100644 --- a/go.sum +++ b/go.sum @@ -244,6 +244,8 @@ github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 h1:NwddG0xUTBM2zoq4D8rotQmT2Z/S github.com/aws/aws-sdk-go-v2/service/fis v1.26.3/go.mod h1:QmdVf0N/vrhckZLHK4x+f+u9EUuMhetsRgu1rjU1eL0= github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 h1:QeYAz3JhpkTxkS+fifDBfmgWFdSRBI21MQzN2bCO1xo= github.com/aws/aws-sdk-go-v2/service/fms v1.35.3/go.mod h1:GXASgVouW5X/bmEgOoV/tkzJkp5ib7ZeA+YxMc5piqs= +github.com/aws/aws-sdk-go-v2/service/fsx v1.47.2 h1:EDZ4UX4c8NJl5Zm2tj1OlbVdNA0wv2xNt55L6g38Va4= +github.com/aws/aws-sdk-go-v2/service/fsx v1.47.2/go.mod h1:OKCxqzNOd8LpwsIgoWIhjTkDONHuv3uLoObiT/fbS4Q= github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3 h1:de8RU808VMx8km6t2wY3WDWigB6GqbNEcyVQRJFaIYs= github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3/go.mod h1:F/qjepwnxPHHUTK9ikZp14jLyrvB18kZ/22MmaPxtHE= github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.27.0 h1:nlm6tZX8gwsVktDKTQe3IOagNVK1+6CGf9IpdWM6x+E= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 1fc533168a75..abe4e8c75abc 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -104,6 +104,7 @@ import ( firehose_sdkv2 "github.com/aws/aws-sdk-go-v2/service/firehose" fis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fis" fms_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fms" + fsx_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fsx" glacier_sdkv2 "github.com/aws/aws-sdk-go-v2/service/glacier" globalaccelerator_sdkv2 "github.com/aws/aws-sdk-go-v2/service/globalaccelerator" grafana_sdkv2 "github.com/aws/aws-sdk-go-v2/service/grafana" @@ -216,7 +217,6 @@ import ( ec2_sdkv1 "github.com/aws/aws-sdk-go/service/ec2" elasticsearchservice_sdkv1 "github.com/aws/aws-sdk-go/service/elasticsearchservice" emr_sdkv1 "github.com/aws/aws-sdk-go/service/emr" - fsx_sdkv1 "github.com/aws/aws-sdk-go/service/fsx" gamelift_sdkv1 "github.com/aws/aws-sdk-go/service/gamelift" glue_sdkv1 "github.com/aws/aws-sdk-go/service/glue" guardduty_sdkv1 "github.com/aws/aws-sdk-go/service/guardduty" @@ -670,8 +670,8 @@ func (c *AWSClient) FMSClient(ctx context.Context) *fms_sdkv2.Client { return errs.Must(client[*fms_sdkv2.Client](ctx, c, names.FMS, make(map[string]any))) } -func (c *AWSClient) FSxConn(ctx context.Context) *fsx_sdkv1.FSx { - return errs.Must(conn[*fsx_sdkv1.FSx](ctx, c, names.FSx, make(map[string]any))) +func (c *AWSClient) FSxClient(ctx context.Context) *fsx_sdkv2.Client { + return errs.Must(client[*fsx_sdkv2.Client](ctx, c, names.FSx, make(map[string]any))) } func (c *AWSClient) FinSpaceClient(ctx context.Context) *finspace_sdkv2.Client { diff --git a/internal/service/fsx/backup.go b/internal/service/fsx/backup.go index 1d1cef89baa3..5741794bc63e 100644 --- a/internal/service/fsx/backup.go +++ b/internal/service/fsx/backup.go @@ -8,15 +8,17 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -82,7 +84,7 @@ func resourceBackup() *schema.Resource { func resourceBackupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.CreateBackupInput{ ClientRequestToken: aws.String(id.UniqueId()), @@ -105,13 +107,13 @@ func resourceBackupCreate(ctx context.Context, d *schema.ResourceData, meta inte return sdkdiag.AppendErrorf(diags, "creating FSx Backup: %s", "can only specify either file_system_id or volume_id") } - output, err := conn.CreateBackupWithContext(ctx, input) + output, err := conn.CreateBackup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx Backup: %s", err) } - d.SetId(aws.StringValue(output.Backup.BackupId)) + d.SetId(aws.ToString(output.Backup.BackupId)) if _, err := waitBackupAvailable(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx Backup (%s) create: %s", d.Id(), err) @@ -122,7 +124,7 @@ func resourceBackupCreate(ctx context.Context, d *schema.ResourceData, meta inte func resourceBackupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) backup, err := findBackupByID(ctx, conn, d.Id()) @@ -162,14 +164,14 @@ func resourceBackupUpdate(ctx context.Context, d *schema.ResourceData, meta inte func resourceBackupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) log.Printf("[INFO] Deleting FSx Backup: %s", d.Id()) - _, err := conn.DeleteBackupWithContext(ctx, &fsx.DeleteBackupInput{ + _, err := conn.DeleteBackup(ctx, &fsx.DeleteBackupInput{ BackupId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeBackupNotFound) { + if errs.IsA[*awstypes.BackupNotFound](err) { return diags } @@ -184,56 +186,53 @@ func resourceBackupDelete(ctx context.Context, d *schema.ResourceData, meta inte return diags } -func findBackupByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Backup, error) { +func findBackupByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.Backup, error) { input := &fsx.DescribeBackupsInput{ - BackupIds: aws.StringSlice([]string{id}), + BackupIds: []string{id}, } - return findBackup(ctx, conn, input, tfslices.PredicateTrue[*fsx.Backup]()) + return findBackup(ctx, conn, input, tfslices.PredicateTrue[*awstypes.Backup]()) } -func findBackup(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeBackupsInput, filter tfslices.Predicate[*fsx.Backup]) (*fsx.Backup, error) { +func findBackup(ctx context.Context, conn *fsx.Client, input *fsx.DescribeBackupsInput, filter tfslices.Predicate[*awstypes.Backup]) (*awstypes.Backup, error) { output, err := findBackups(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findBackups(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeBackupsInput, filter tfslices.Predicate[*fsx.Backup]) ([]*fsx.Backup, error) { - var output []*fsx.Backup +func findBackups(ctx context.Context, conn *fsx.Client, input *fsx.DescribeBackupsInput, filter tfslices.Predicate[*awstypes.Backup]) ([]awstypes.Backup, error) { + var output []awstypes.Backup - err := conn.DescribeBackupsPagesWithContext(ctx, input, func(page *fsx.DescribeBackupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := fsx.NewDescribeBackupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Backups { - if v != nil && filter(v) { - output = append(output, v) + if errs.IsA[*awstypes.FileSystemNotFound](err) || errs.IsA[*awstypes.BackupNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) || tfawserr.ErrCodeEquals(err, fsx.ErrCodeBackupNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.Backups { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusBackup(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { +func statusBackup(ctx context.Context, conn *fsx.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findBackupByID(ctx, conn, id) @@ -245,36 +244,36 @@ func statusBackup(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefr return nil, "", err } - return output, aws.StringValue(output.Lifecycle), nil + return output, string(output.Lifecycle), nil } } -func waitBackupAvailable(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Backup, error) { +func waitBackupAvailable(ctx context.Context, conn *fsx.Client, id string) (*awstypes.Backup, error) { const ( timeout = 10 * time.Minute ) stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.BackupLifecycleCreating, fsx.BackupLifecyclePending, fsx.BackupLifecycleTransferring}, - Target: []string{fsx.BackupLifecycleAvailable}, + Pending: enum.Slice(awstypes.BackupLifecycleCreating, awstypes.BackupLifecyclePending, awstypes.BackupLifecycleTransferring), + Target: enum.Slice(awstypes.BackupLifecycleAvailable), Refresh: statusBackup(ctx, conn, id), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.Backup); ok { + if output, ok := outputRaw.(*awstypes.Backup); ok { return output, err } return nil, err } -func waitBackupDeleted(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Backup, error) { +func waitBackupDeleted(ctx context.Context, conn *fsx.Client, id string) (*awstypes.Backup, error) { const ( timeout = 10 * time.Minute ) stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.FileSystemLifecycleDeleting}, + Pending: enum.Slice(awstypes.FileSystemLifecycleDeleting), Target: []string{}, Refresh: statusBackup(ctx, conn, id), Timeout: timeout, @@ -282,7 +281,7 @@ func waitBackupDeleted(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Back outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.Backup); ok { + if output, ok := outputRaw.(*awstypes.Backup); ok { return output, err } diff --git a/internal/service/fsx/backup_test.go b/internal/service/fsx/backup_test.go index 9d20a14e91b7..2238fae7b5a8 100644 --- a/internal/service/fsx/backup_test.go +++ b/internal/service/fsx/backup_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,12 +23,12 @@ import ( func TestAccFSxBackup_basic(t *testing.T) { ctx := acctest.Context(t) - var backup fsx.Backup + var backup awstypes.Backup resourceName := "aws_fsx_backup.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBackupDestroy(ctx), @@ -53,14 +53,14 @@ func TestAccFSxBackup_basic(t *testing.T) { func TestAccFSxBackup_ontapBasic(t *testing.T) { ctx := acctest.Context(t) - var backup fsx.Backup + var backup awstypes.Backup resourceName := "aws_fsx_backup.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) //FSX ONTAP Volume Names can't use dash only underscore vName := strings.Replace(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), "-", "_", -1) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBackupDestroy(ctx), @@ -85,12 +85,12 @@ func TestAccFSxBackup_ontapBasic(t *testing.T) { func TestAccFSxBackup_openzfsBasic(t *testing.T) { ctx := acctest.Context(t) - var backup fsx.Backup + var backup awstypes.Backup resourceName := "aws_fsx_backup.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBackupDestroy(ctx), @@ -115,13 +115,13 @@ func TestAccFSxBackup_openzfsBasic(t *testing.T) { func TestAccFSxBackup_windowsBasic(t *testing.T) { ctx := acctest.Context(t) - var backup fsx.Backup + var backup awstypes.Backup resourceName := "aws_fsx_backup.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBackupDestroy(ctx), @@ -146,12 +146,12 @@ func TestAccFSxBackup_windowsBasic(t *testing.T) { func TestAccFSxBackup_disappears(t *testing.T) { ctx := acctest.Context(t) - var backup fsx.Backup + var backup awstypes.Backup resourceName := "aws_fsx_backup.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBackupDestroy(ctx), @@ -170,12 +170,12 @@ func TestAccFSxBackup_disappears(t *testing.T) { func TestAccFSxBackup_Disappears_filesystem(t *testing.T) { ctx := acctest.Context(t) - var backup fsx.Backup + var backup awstypes.Backup resourceName := "aws_fsx_backup.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBackupDestroy(ctx), @@ -194,12 +194,12 @@ func TestAccFSxBackup_Disappears_filesystem(t *testing.T) { func TestAccFSxBackup_tags(t *testing.T) { ctx := acctest.Context(t) - var backup fsx.Backup + var backup awstypes.Backup resourceName := "aws_fsx_backup.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBackupDestroy(ctx), @@ -240,12 +240,12 @@ func TestAccFSxBackup_tags(t *testing.T) { func TestAccFSxBackup_implicitTags(t *testing.T) { ctx := acctest.Context(t) - var backup fsx.Backup + var backup awstypes.Backup resourceName := "aws_fsx_backup.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBackupDestroy(ctx), @@ -267,14 +267,14 @@ func TestAccFSxBackup_implicitTags(t *testing.T) { }) } -func testAccCheckBackupExists(ctx context.Context, n string, v *fsx.Backup) resource.TestCheckFunc { +func testAccCheckBackupExists(ctx context.Context, n string, v *awstypes.Backup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindBackupByID(ctx, conn, rs.Primary.ID) @@ -290,7 +290,7 @@ func testAccCheckBackupExists(ctx context.Context, n string, v *fsx.Backup) reso func testAccCheckBackupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_backup" { diff --git a/internal/service/fsx/common_schema_data_source.go b/internal/service/fsx/common_schema_data_source.go index 07fb7127dd6f..473b19c88160 100644 --- a/internal/service/fsx/common_schema_data_source.go +++ b/internal/service/fsx/common_schema_data_source.go @@ -4,24 +4,23 @@ package fsx import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/names" ) -func newSnapshotFilterList(s *schema.Set) []*fsx.SnapshotFilter { +func newSnapshotFilterList(s *schema.Set) []awstypes.SnapshotFilter { if s == nil { - return []*fsx.SnapshotFilter{} + return []awstypes.SnapshotFilter{} } - return tfslices.ApplyToAll(s.List(), func(tfList interface{}) *fsx.SnapshotFilter { + return tfslices.ApplyToAll(s.List(), func(tfList interface{}) awstypes.SnapshotFilter { tfMap := tfList.(map[string]interface{}) - return &fsx.SnapshotFilter{ - Name: aws.String(tfMap[names.AttrName].(string)), - Values: flex.ExpandStringList(tfMap[names.AttrValues].([]interface{})), + return awstypes.SnapshotFilter{ + Name: awstypes.SnapshotFilterName(tfMap[names.AttrName].(string)), + Values: flex.ExpandStringValueList(tfMap[names.AttrValues].([]interface{})), } }) } @@ -48,16 +47,16 @@ func snapshotFiltersSchema() *schema.Schema { } } -func newStorageVirtualMachineFilterList(s *schema.Set) []*fsx.StorageVirtualMachineFilter { +func newStorageVirtualMachineFilterList(s *schema.Set) []awstypes.StorageVirtualMachineFilter { if s == nil { - return []*fsx.StorageVirtualMachineFilter{} + return []awstypes.StorageVirtualMachineFilter{} } - return tfslices.ApplyToAll(s.List(), func(tfList interface{}) *fsx.StorageVirtualMachineFilter { + return tfslices.ApplyToAll(s.List(), func(tfList interface{}) awstypes.StorageVirtualMachineFilter { tfMap := tfList.(map[string]interface{}) - return &fsx.StorageVirtualMachineFilter{ - Name: aws.String(tfMap[names.AttrName].(string)), - Values: flex.ExpandStringList(tfMap[names.AttrValues].([]interface{})), + return awstypes.StorageVirtualMachineFilter{ + Name: awstypes.StorageVirtualMachineFilterName(tfMap[names.AttrName].(string)), + Values: flex.ExpandStringValueList(tfMap[names.AttrValues].([]interface{})), } }) } diff --git a/internal/service/fsx/data_repository_association.go b/internal/service/fsx/data_repository_association.go index 04c75aaebab2..3ad97e9a96ad 100644 --- a/internal/service/fsx/data_repository_association.go +++ b/internal/service/fsx/data_repository_association.go @@ -10,9 +10,9 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -20,6 +20,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -120,8 +122,8 @@ func resourceDataRepositoryAssociation() *schema.Resource { Optional: true, Computed: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(fsx.EventType_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.EventType](), }, }, }, @@ -140,8 +142,8 @@ func resourceDataRepositoryAssociation() *schema.Resource { Optional: true, Computed: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(fsx.EventType_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.EventType](), }, }, }, @@ -163,7 +165,7 @@ func resourceDataRepositoryAssociation() *schema.Resource { func resourceDataRepositoryAssociationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.CreateDataRepositoryAssociationInput{ ClientRequestToken: aws.String(id.UniqueId()), @@ -178,20 +180,20 @@ func resourceDataRepositoryAssociationCreate(ctx context.Context, d *schema.Reso } if v, ok := d.GetOk("imported_file_chunk_size"); ok { - input.ImportedFileChunkSize = aws.Int64(int64(v.(int))) + input.ImportedFileChunkSize = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("s3"); ok { input.S3 = expandDataRepositoryAssociationS3(v.([]interface{})) } - output, err := conn.CreateDataRepositoryAssociationWithContext(ctx, input) + output, err := conn.CreateDataRepositoryAssociation(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for Lustre Data Repository Association: %s", err) } - d.SetId(aws.StringValue(output.Association.AssociationId)) + d.SetId(aws.ToString(output.Association.AssociationId)) if _, err := waitDataRepositoryAssociationCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre Data Repository Association (%s) create: %s", d.Id(), err) @@ -202,7 +204,7 @@ func resourceDataRepositoryAssociationCreate(ctx context.Context, d *schema.Reso func resourceDataRepositoryAssociationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) association, err := findDataRepositoryAssociationByID(ctx, conn, d.Id()) @@ -233,7 +235,7 @@ func resourceDataRepositoryAssociationRead(ctx context.Context, d *schema.Resour func resourceDataRepositoryAssociationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &fsx.UpdateDataRepositoryAssociationInput{ @@ -242,14 +244,14 @@ func resourceDataRepositoryAssociationUpdate(ctx context.Context, d *schema.Reso } if d.HasChange("imported_file_chunk_size") { - input.ImportedFileChunkSize = aws.Int64(int64(d.Get("imported_file_chunk_size").(int))) + input.ImportedFileChunkSize = aws.Int32(int32(d.Get("imported_file_chunk_size").(int))) } if d.HasChange("s3") { input.S3 = expandDataRepositoryAssociationS3(d.Get("s3").([]interface{})) } - _, err := conn.UpdateDataRepositoryAssociationWithContext(ctx, input) + _, err := conn.UpdateDataRepositoryAssociation(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for Lustre Data Repository Association (%s): %s", d.Id(), err) @@ -265,7 +267,7 @@ func resourceDataRepositoryAssociationUpdate(ctx context.Context, d *schema.Reso func resourceDataRepositoryAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) request := &fsx.DeleteDataRepositoryAssociationInput{ AssociationId: aws.String(d.Id()), @@ -274,9 +276,9 @@ func resourceDataRepositoryAssociationDelete(ctx context.Context, d *schema.Reso } log.Printf("[DEBUG] Deleting FSx for Lustre Data Repository Association: %s", d.Id()) - _, err := conn.DeleteDataRepositoryAssociationWithContext(ctx, request) + _, err := conn.DeleteDataRepositoryAssociation(ctx, request) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeDataRepositoryAssociationNotFound) { + if errs.IsA[*awstypes.DataRepositoryAssociationNotFound](err) { return diags } @@ -291,147 +293,53 @@ func resourceDataRepositoryAssociationDelete(ctx context.Context, d *schema.Reso return diags } -func expandDataRepositoryAssociationS3(cfg []interface{}) *fsx.S3DataRepositoryConfiguration { - if len(cfg) == 0 || cfg[0] == nil { - return nil - } - - m := cfg[0].(map[string]interface{}) - - s3Config := &fsx.S3DataRepositoryConfiguration{} - - if v, ok := m["auto_export_policy"]; ok { - policy := v.([]interface{}) - s3Config.AutoExportPolicy = expandDataRepositoryAssociationS3AutoExportPolicy(policy) - } - if v, ok := m["auto_import_policy"]; ok { - policy := v.([]interface{}) - s3Config.AutoImportPolicy = expandDataRepositoryAssociationS3AutoImportPolicy(policy) - } - - return s3Config -} - -func expandDataRepositoryAssociationS3AutoExportPolicy(policy []interface{}) *fsx.AutoExportPolicy { - if len(policy) == 0 || policy[0] == nil { - return nil - } - - m := policy[0].(map[string]interface{}) - autoExportPolicy := &fsx.AutoExportPolicy{} - - if v, ok := m["events"]; ok { - autoExportPolicy.Events = flex.ExpandStringList(v.([]interface{})) - } - - return autoExportPolicy -} - -func expandDataRepositoryAssociationS3AutoImportPolicy(policy []interface{}) *fsx.AutoImportPolicy { - if len(policy) == 0 || policy[0] == nil { - return nil - } - - m := policy[0].(map[string]interface{}) - autoImportPolicy := &fsx.AutoImportPolicy{} - - if v, ok := m["events"]; ok { - autoImportPolicy.Events = flex.ExpandStringList(v.([]interface{})) - } - - return autoImportPolicy -} - -func flattenDataRepositoryAssociationS3(s3Config *fsx.S3DataRepositoryConfiguration) []map[string]interface{} { - result := make(map[string]interface{}) - if s3Config == nil { - return []map[string]interface{}{result} - } - - if s3Config.AutoExportPolicy != nil { - result["auto_export_policy"] = flattenS3AutoExportPolicy(s3Config.AutoExportPolicy) - } - if s3Config.AutoImportPolicy != nil { - result["auto_import_policy"] = flattenS3AutoImportPolicy(s3Config.AutoImportPolicy) - } - - return []map[string]interface{}{result} -} - -func flattenS3AutoExportPolicy(policy *fsx.AutoExportPolicy) []map[string][]interface{} { - result := make(map[string][]interface{}) - if policy == nil { - return []map[string][]interface{}{result} - } - if policy.Events != nil { - result["events"] = flex.FlattenStringList(policy.Events) - } - - return []map[string][]interface{}{result} -} - -func flattenS3AutoImportPolicy(policy *fsx.AutoImportPolicy) []map[string][]interface{} { - result := make(map[string][]interface{}) - if policy == nil { - return []map[string][]interface{}{result} - } - if policy.Events != nil { - result["events"] = flex.FlattenStringList(policy.Events) - } - - return []map[string][]interface{}{result} -} - -func findDataRepositoryAssociationByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.DataRepositoryAssociation, error) { +func findDataRepositoryAssociationByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.DataRepositoryAssociation, error) { input := &fsx.DescribeDataRepositoryAssociationsInput{ - AssociationIds: aws.StringSlice([]string{id}), + AssociationIds: []string{id}, } - return findDataRepositoryAssociation(ctx, conn, input, tfslices.PredicateTrue[*fsx.DataRepositoryAssociation]()) + return findDataRepositoryAssociation(ctx, conn, input, tfslices.PredicateTrue[*awstypes.DataRepositoryAssociation]()) } -func findDataRepositoryAssociation(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeDataRepositoryAssociationsInput, filter tfslices.Predicate[*fsx.DataRepositoryAssociation]) (*fsx.DataRepositoryAssociation, error) { +func findDataRepositoryAssociation(ctx context.Context, conn *fsx.Client, input *fsx.DescribeDataRepositoryAssociationsInput, filter tfslices.Predicate[*awstypes.DataRepositoryAssociation]) (*awstypes.DataRepositoryAssociation, error) { output, err := findDataRepositoryAssociations(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findDataRepositoryAssociations(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeDataRepositoryAssociationsInput, filter tfslices.Predicate[*fsx.DataRepositoryAssociation]) ([]*fsx.DataRepositoryAssociation, error) { - var output []*fsx.DataRepositoryAssociation +func findDataRepositoryAssociations(ctx context.Context, conn *fsx.Client, input *fsx.DescribeDataRepositoryAssociationsInput, filter tfslices.Predicate[*awstypes.DataRepositoryAssociation]) ([]awstypes.DataRepositoryAssociation, error) { + var output []awstypes.DataRepositoryAssociation - err := conn.DescribeDataRepositoryAssociationsPagesWithContext(ctx, input, func(page *fsx.DescribeDataRepositoryAssociationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := fsx.NewDescribeDataRepositoryAssociationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Associations { - if v != nil && filter(v) { - output = append(output, v) + if errs.IsA[*awstypes.DataRepositoryAssociationNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeDataRepositoryAssociationNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.Associations { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusDataRepositoryAssociation(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { +func statusDataRepositoryAssociation(ctx context.Context, conn *fsx.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findDataRepositoryAssociationByID(ctx, conn, id) @@ -443,14 +351,14 @@ func statusDataRepositoryAssociation(ctx context.Context, conn *fsx.FSx, id stri return nil, "", err } - return output, aws.StringValue(output.Lifecycle), nil + return output, string(output.Lifecycle), nil } } -func waitDataRepositoryAssociationCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.DataRepositoryAssociation, error) { +func waitDataRepositoryAssociationCreated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.DataRepositoryAssociation, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.DataRepositoryLifecycleCreating}, - Target: []string{fsx.DataRepositoryLifecycleAvailable}, + Pending: enum.Slice(awstypes.DataRepositoryLifecycleCreating), + Target: enum.Slice(awstypes.DataRepositoryLifecycleAvailable), Refresh: statusDataRepositoryAssociation(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, @@ -458,9 +366,9 @@ func waitDataRepositoryAssociationCreated(ctx context.Context, conn *fsx.FSx, id outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.DataRepositoryAssociation); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.DataRepositoryLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + if output, ok := outputRaw.(*awstypes.DataRepositoryAssociation); ok { + if status, details := output.Lifecycle, output.FailureDetails; status == awstypes.DataRepositoryLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.FailureDetails.Message))) } return output, err @@ -469,10 +377,10 @@ func waitDataRepositoryAssociationCreated(ctx context.Context, conn *fsx.FSx, id return nil, err } -func waitDataRepositoryAssociationUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.DataRepositoryAssociation, error) { +func waitDataRepositoryAssociationUpdated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.DataRepositoryAssociation, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.DataRepositoryLifecycleUpdating}, - Target: []string{fsx.DataRepositoryLifecycleAvailable}, + Pending: enum.Slice(awstypes.DataRepositoryLifecycleUpdating), + Target: enum.Slice(awstypes.DataRepositoryLifecycleAvailable), Refresh: statusDataRepositoryAssociation(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, @@ -480,9 +388,9 @@ func waitDataRepositoryAssociationUpdated(ctx context.Context, conn *fsx.FSx, id outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.DataRepositoryAssociation); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.DataRepositoryLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + if output, ok := outputRaw.(*awstypes.DataRepositoryAssociation); ok { + if status, details := output.Lifecycle, output.FailureDetails; status == awstypes.DataRepositoryLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.FailureDetails.Message))) } return output, err @@ -491,9 +399,9 @@ func waitDataRepositoryAssociationUpdated(ctx context.Context, conn *fsx.FSx, id return nil, err } -func waitDataRepositoryAssociationDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.DataRepositoryAssociation, error) { +func waitDataRepositoryAssociationDeleted(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.DataRepositoryAssociation, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.DataRepositoryLifecycleAvailable, fsx.DataRepositoryLifecycleDeleting}, + Pending: enum.Slice(awstypes.DataRepositoryLifecycleAvailable, awstypes.DataRepositoryLifecycleDeleting), Target: []string{}, Refresh: statusDataRepositoryAssociation(ctx, conn, id), Timeout: timeout, @@ -502,9 +410,9 @@ func waitDataRepositoryAssociationDeleted(ctx context.Context, conn *fsx.FSx, id outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.DataRepositoryAssociation); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.DataRepositoryLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + if output, ok := outputRaw.(*awstypes.DataRepositoryAssociation); ok { + if status, details := output.Lifecycle, output.FailureDetails; status == awstypes.DataRepositoryLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.FailureDetails.Message))) } return output, err @@ -512,3 +420,94 @@ func waitDataRepositoryAssociationDeleted(ctx context.Context, conn *fsx.FSx, id return nil, err } + +func expandDataRepositoryAssociationS3(cfg []interface{}) *awstypes.S3DataRepositoryConfiguration { + if len(cfg) == 0 || cfg[0] == nil { + return nil + } + + m := cfg[0].(map[string]interface{}) + + s3Config := &awstypes.S3DataRepositoryConfiguration{} + + if v, ok := m["auto_export_policy"]; ok { + policy := v.([]interface{}) + s3Config.AutoExportPolicy = expandDataRepositoryAssociationS3AutoExportPolicy(policy) + } + if v, ok := m["auto_import_policy"]; ok { + policy := v.([]interface{}) + s3Config.AutoImportPolicy = expandDataRepositoryAssociationS3AutoImportPolicy(policy) + } + + return s3Config +} + +func expandDataRepositoryAssociationS3AutoExportPolicy(policy []interface{}) *awstypes.AutoExportPolicy { + if len(policy) == 0 || policy[0] == nil { + return nil + } + + m := policy[0].(map[string]interface{}) + autoExportPolicy := &awstypes.AutoExportPolicy{} + + if v, ok := m["events"]; ok { + autoExportPolicy.Events = flex.ExpandStringyValueList[awstypes.EventType](v.([]interface{})) + } + + return autoExportPolicy +} + +func expandDataRepositoryAssociationS3AutoImportPolicy(policy []interface{}) *awstypes.AutoImportPolicy { + if len(policy) == 0 || policy[0] == nil { + return nil + } + + m := policy[0].(map[string]interface{}) + autoImportPolicy := &awstypes.AutoImportPolicy{} + + if v, ok := m["events"]; ok { + autoImportPolicy.Events = flex.ExpandStringyValueList[awstypes.EventType](v.([]interface{})) + } + + return autoImportPolicy +} + +func flattenDataRepositoryAssociationS3(s3Config *awstypes.S3DataRepositoryConfiguration) []map[string]interface{} { + result := make(map[string]interface{}) + if s3Config == nil { + return []map[string]interface{}{result} + } + + if s3Config.AutoExportPolicy != nil { + result["auto_export_policy"] = flattenS3AutoExportPolicy(s3Config.AutoExportPolicy) + } + if s3Config.AutoImportPolicy != nil { + result["auto_import_policy"] = flattenS3AutoImportPolicy(s3Config.AutoImportPolicy) + } + + return []map[string]interface{}{result} +} + +func flattenS3AutoExportPolicy(policy *awstypes.AutoExportPolicy) []map[string][]interface{} { + result := make(map[string][]interface{}) + if policy == nil { + return []map[string][]interface{}{result} + } + if policy.Events != nil { + result["events"] = flex.FlattenStringyValueList(policy.Events) + } + + return []map[string][]interface{}{result} +} + +func flattenS3AutoImportPolicy(policy *awstypes.AutoImportPolicy) []map[string][]interface{} { + result := make(map[string][]interface{}) + if policy == nil { + return []map[string][]interface{}{result} + } + if policy.Events != nil { + result["events"] = flex.FlattenStringyValueList(policy.Events) + } + + return []map[string][]interface{}{result} +} diff --git a/internal/service/fsx/data_repository_association_test.go b/internal/service/fsx/data_repository_association_test.go index 342834c13f38..aa72df86d2e2 100644 --- a/internal/service/fsx/data_repository_association_test.go +++ b/internal/service/fsx/data_repository_association_test.go @@ -10,9 +10,9 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/service/fsx" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -25,7 +25,7 @@ import ( func TestAccFSxDataRepositoryAssociation_basic(t *testing.T) { ctx := acctest.Context(t) - var association fsx.DataRepositoryAssociation + var association awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketPath := fmt.Sprintf("s3://%s", rName) @@ -34,7 +34,7 @@ func TestAccFSxDataRepositoryAssociation_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -66,7 +66,7 @@ func TestAccFSxDataRepositoryAssociation_basic(t *testing.T) { func TestAccFSxDataRepositoryAssociation_disappears(t *testing.T) { ctx := acctest.Context(t) - var association fsx.DataRepositoryAssociation + var association awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" @@ -74,7 +74,7 @@ func TestAccFSxDataRepositoryAssociation_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -96,7 +96,7 @@ func TestAccFSxDataRepositoryAssociation_disappears(t *testing.T) { func TestAccFSxDataRepositoryAssociation_disappears_ParentFileSystem(t *testing.T) { ctx := acctest.Context(t) - var association fsx.DataRepositoryAssociation + var association awstypes.DataRepositoryAssociation parentResourceName := "aws_fsx_lustre_file_system.test" resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -105,7 +105,7 @@ func TestAccFSxDataRepositoryAssociation_disappears_ParentFileSystem(t *testing. resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -127,7 +127,7 @@ func TestAccFSxDataRepositoryAssociation_disappears_ParentFileSystem(t *testing. func TestAccFSxDataRepositoryAssociation_fileSystemPathUpdated(t *testing.T) { ctx := acctest.Context(t) - var association1, association2 fsx.DataRepositoryAssociation + var association1, association2 awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath1 := "/test1" @@ -136,7 +136,7 @@ func TestAccFSxDataRepositoryAssociation_fileSystemPathUpdated(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -171,7 +171,7 @@ func TestAccFSxDataRepositoryAssociation_fileSystemPathUpdated(t *testing.T) { func TestAccFSxDataRepositoryAssociation_dataRepositoryPathUpdated(t *testing.T) { ctx := acctest.Context(t) - var association1, association2 fsx.DataRepositoryAssociation + var association1, association2 awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) bucketName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -183,7 +183,7 @@ func TestAccFSxDataRepositoryAssociation_dataRepositoryPathUpdated(t *testing.T) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -219,7 +219,7 @@ func TestAccFSxDataRepositoryAssociation_dataRepositoryPathUpdated(t *testing.T) // lintignore:AT002 func TestAccFSxDataRepositoryAssociation_importedFileChunkSize(t *testing.T) { ctx := acctest.Context(t) - var association fsx.DataRepositoryAssociation + var association awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" @@ -227,7 +227,7 @@ func TestAccFSxDataRepositoryAssociation_importedFileChunkSize(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -255,7 +255,7 @@ func TestAccFSxDataRepositoryAssociation_importedFileChunkSize(t *testing.T) { // lintignore:AT002 func TestAccFSxDataRepositoryAssociation_importedFileChunkSizeUpdated(t *testing.T) { ctx := acctest.Context(t) - var association1, association2 fsx.DataRepositoryAssociation + var association1, association2 awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" @@ -263,7 +263,7 @@ func TestAccFSxDataRepositoryAssociation_importedFileChunkSizeUpdated(t *testing resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -298,7 +298,7 @@ func TestAccFSxDataRepositoryAssociation_importedFileChunkSizeUpdated(t *testing func TestAccFSxDataRepositoryAssociation_deleteDataInFilesystem(t *testing.T) { ctx := acctest.Context(t) - var association fsx.DataRepositoryAssociation + var association awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" @@ -306,7 +306,7 @@ func TestAccFSxDataRepositoryAssociation_deleteDataInFilesystem(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -333,7 +333,7 @@ func TestAccFSxDataRepositoryAssociation_deleteDataInFilesystem(t *testing.T) { func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicy(t *testing.T) { ctx := acctest.Context(t) - var association fsx.DataRepositoryAssociation + var association awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" @@ -342,7 +342,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicy(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -371,7 +371,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicy(t *testing.T) { func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicyUpdate(t *testing.T) { ctx := acctest.Context(t) - var association1, association2 fsx.DataRepositoryAssociation + var association1, association2 awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" @@ -381,7 +381,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicyUpdate(t *testing.T) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -418,7 +418,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoExportPolicyUpdate(t *testing.T) func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicy(t *testing.T) { ctx := acctest.Context(t) - var association fsx.DataRepositoryAssociation + var association awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" @@ -427,7 +427,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicy(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -456,7 +456,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicy(t *testing.T) { func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicyUpdate(t *testing.T) { ctx := acctest.Context(t) - var association1, association2 fsx.DataRepositoryAssociation + var association1, association2 awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" @@ -466,7 +466,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicyUpdate(t *testing.T) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -503,7 +503,7 @@ func TestAccFSxDataRepositoryAssociation_s3AutoImportPolicyUpdate(t *testing.T) func TestAccFSxDataRepositoryAssociation_s3FullPolicy(t *testing.T) { ctx := acctest.Context(t) - var association fsx.DataRepositoryAssociation + var association awstypes.DataRepositoryAssociation resourceName := "aws_fsx_data_repository_association.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) fileSystemPath := "/test" @@ -511,7 +511,7 @@ func TestAccFSxDataRepositoryAssociation_s3FullPolicy(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) // PERSISTENT_2 deployment_type is not supported in GovCloud partition. acctest.PreCheckPartitionNot(t, endpoints.AwsUsGovPartitionID) }, @@ -541,14 +541,14 @@ func TestAccFSxDataRepositoryAssociation_s3FullPolicy(t *testing.T) { }) } -func testAccCheckDataRepositoryAssociationExists(ctx context.Context, n string, v *fsx.DataRepositoryAssociation) resource.TestCheckFunc { +func testAccCheckDataRepositoryAssociationExists(ctx context.Context, n string, v *awstypes.DataRepositoryAssociation) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindDataRepositoryAssociationByID(ctx, conn, rs.Primary.ID) @@ -564,7 +564,7 @@ func testAccCheckDataRepositoryAssociationExists(ctx context.Context, n string, func testAccCheckDataRepositoryAssociationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_data_repository_association" { @@ -588,20 +588,20 @@ func testAccCheckDataRepositoryAssociationDestroy(ctx context.Context) resource. } } -func testAccCheckDataRepositoryAssociationNotRecreated(i, j *fsx.DataRepositoryAssociation) resource.TestCheckFunc { +func testAccCheckDataRepositoryAssociationNotRecreated(i, j *awstypes.DataRepositoryAssociation) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.AssociationId) != aws.StringValue(j.AssociationId) { - return fmt.Errorf("FSx Data Repository Association (%s) recreated", aws.StringValue(i.AssociationId)) + if aws.ToString(i.AssociationId) != aws.ToString(j.AssociationId) { + return fmt.Errorf("FSx Data Repository Association (%s) recreated", aws.ToString(i.AssociationId)) } return nil } } -func testAccCheckDataRepositoryAssociationRecreated(i, j *fsx.DataRepositoryAssociation) resource.TestCheckFunc { +func testAccCheckDataRepositoryAssociationRecreated(i, j *awstypes.DataRepositoryAssociation) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.AssociationId) == aws.StringValue(j.AssociationId) { - return fmt.Errorf("FSx Data Repository Association (%s) not recreated", aws.StringValue(i.AssociationId)) + if aws.ToString(i.AssociationId) == aws.ToString(j.AssociationId) { + return fmt.Errorf("FSx Data Repository Association (%s) not recreated", aws.ToString(i.AssociationId)) } return nil diff --git a/internal/service/fsx/file_cache.go b/internal/service/fsx/file_cache.go index 5d7045d63199..3f61f3d6c38b 100644 --- a/internal/service/fsx/file_cache.go +++ b/internal/service/fsx/file_cache.go @@ -10,15 +10,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -127,11 +129,9 @@ func resourceFileCache() *schema.Resource { }, }, names.AttrVersion: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringInSlice(fsx.NfsVersion_Values(), false), - ), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.NfsVersion](), }, }, }, @@ -160,12 +160,10 @@ func resourceFileCache() *schema.Resource { Computed: true, }, "file_cache_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringInSlice(fsx.FileCacheType_Values(), false), - ), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.FileCacheType](), }, "file_cache_type_version": { Type: schema.TypeString, @@ -189,12 +187,10 @@ func resourceFileCache() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "deployment_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.All( - validation.StringInSlice(fsx.FileCacheLustreDeploymentType_Values(), false), - ), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.FileCacheLustreDeploymentType](), }, "log_configuration": { Type: schema.TypeSet, @@ -299,14 +295,14 @@ func resourceFileCache() *schema.Resource { func resourceFileCacheCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.CreateFileCacheInput{ ClientRequestToken: aws.String(id.UniqueId()), - FileCacheType: aws.String(d.Get("file_cache_type").(string)), + FileCacheType: awstypes.FileCacheType(d.Get("file_cache_type").(string)), FileCacheTypeVersion: aws.String(d.Get("file_cache_type_version").(string)), - StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), - SubnetIds: flex.ExpandStringList(d.Get(names.AttrSubnetIDs).([]interface{})), + StorageCapacity: aws.Int32(int32(d.Get("storage_capacity").(int))), + SubnetIds: flex.ExpandStringValueList(d.Get(names.AttrSubnetIDs).([]interface{})), Tags: getTagsIn(ctx), } @@ -327,16 +323,16 @@ func resourceFileCacheCreate(ctx context.Context, d *schema.ResourceData, meta i } if v, ok := d.GetOk(names.AttrSecurityGroupIDs); ok { - input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } - output, err := conn.CreateFileCacheWithContext(ctx, input) + output, err := conn.CreateFileCache(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for Lustre File Cache: %s", err) } - d.SetId(aws.StringValue(output.FileCache.FileCacheId)) + d.SetId(aws.ToString(output.FileCache.FileCacheId)) if _, err := waitFileCacheCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File Cache (%s) create: %s", d.Id(), err) @@ -347,7 +343,7 @@ func resourceFileCacheCreate(ctx context.Context, d *schema.ResourceData, meta i func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) filecache, err := findFileCacheByID(ctx, conn, d.Id()) @@ -362,7 +358,7 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int } d.Set(names.AttrARN, filecache.ResourceARN) - dataRepositoryAssociationIDs := aws.StringValueSlice(filecache.DataRepositoryAssociationIds) + dataRepositoryAssociationIDs := filecache.DataRepositoryAssociationIds d.Set("data_repository_association_ids", dataRepositoryAssociationIDs) d.Set(names.AttrDNSName, filecache.DNSName) d.Set("file_cache_id", filecache.FileCacheId) @@ -372,10 +368,10 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int if err := d.Set("lustre_configuration", flattenFileCacheLustreConfiguration(filecache.LustreConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting lustre_configuration: %s", err) } - d.Set("network_interface_ids", aws.StringValueSlice(filecache.NetworkInterfaceIds)) + d.Set("network_interface_ids", filecache.NetworkInterfaceIds) d.Set(names.AttrOwnerID, filecache.OwnerId) d.Set("storage_capacity", filecache.StorageCapacity) - d.Set(names.AttrSubnetIDs, aws.StringValueSlice(filecache.SubnetIds)) + d.Set(names.AttrSubnetIDs, filecache.SubnetIds) d.Set(names.AttrVPCID, filecache.VpcId) dataRepositoryAssociations, err := findDataRepositoryAssociationsByIDs(ctx, conn, dataRepositoryAssociationIDs) @@ -395,20 +391,20 @@ func resourceFileCacheRead(ctx context.Context, d *schema.ResourceData, meta int func resourceFileCacheUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &fsx.UpdateFileCacheInput{ ClientRequestToken: aws.String(id.UniqueId()), FileCacheId: aws.String(d.Id()), - LustreConfiguration: &fsx.UpdateFileCacheLustreConfiguration{}, + LustreConfiguration: &awstypes.UpdateFileCacheLustreConfiguration{}, } if d.HasChanges("lustre_configuration") { input.LustreConfiguration = expandUpdateFileCacheLustreConfiguration(d.Get("lustre_configuration").([]interface{})) } - _, err := conn.UpdateFileCacheWithContext(ctx, input) + _, err := conn.UpdateFileCache(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for Lustre File Cache (%s): %s", d.Id(), err) @@ -424,15 +420,15 @@ func resourceFileCacheUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceFileCacheDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) log.Printf("[INFO] Deleting FSx FileCache: %s", d.Id()) - _, err := conn.DeleteFileCacheWithContext(ctx, &fsx.DeleteFileCacheInput{ + _, err := conn.DeleteFileCache(ctx, &fsx.DeleteFileCacheInput{ ClientRequestToken: aws.String(id.UniqueId()), FileCacheId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileCacheNotFound) { + if errs.IsA[*awstypes.FileCacheNotFound](err) { return diags } @@ -447,56 +443,53 @@ func resourceFileCacheDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func findFileCacheByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileCache, error) { +func findFileCacheByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.FileCache, error) { input := &fsx.DescribeFileCachesInput{ - FileCacheIds: aws.StringSlice([]string{id}), + FileCacheIds: []string{id}, } - return findFileCache(ctx, conn, input, tfslices.PredicateTrue[*fsx.FileCache]()) + return findFileCache(ctx, conn, input, tfslices.PredicateTrue[*awstypes.FileCache]()) } -func findFileCache(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeFileCachesInput, filter tfslices.Predicate[*fsx.FileCache]) (*fsx.FileCache, error) { +func findFileCache(ctx context.Context, conn *fsx.Client, input *fsx.DescribeFileCachesInput, filter tfslices.Predicate[*awstypes.FileCache]) (*awstypes.FileCache, error) { output, err := findFileCaches(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findFileCaches(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeFileCachesInput, filter tfslices.Predicate[*fsx.FileCache]) ([]*fsx.FileCache, error) { - var output []*fsx.FileCache +func findFileCaches(ctx context.Context, conn *fsx.Client, input *fsx.DescribeFileCachesInput, filter tfslices.Predicate[*awstypes.FileCache]) ([]awstypes.FileCache, error) { + var output []awstypes.FileCache - err := conn.DescribeFileCachesPagesWithContext(ctx, input, func(page *fsx.DescribeFileCachesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := fsx.NewDescribeFileCachesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.FileCaches { - if v != nil && filter(v) { - output = append(output, v) + if errs.IsA[*awstypes.FileCacheNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileCacheNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.FileCaches { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusFileCache(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { +func statusFileCache(ctx context.Context, conn *fsx.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findFileCacheByID(ctx, conn, id) @@ -508,14 +501,14 @@ func statusFileCache(ctx context.Context, conn *fsx.FSx, id string) retry.StateR return nil, "", err } - return output, aws.StringValue(output.Lifecycle), nil + return output, string(output.Lifecycle), nil } } -func waitFileCacheCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { +func waitFileCacheCreated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.FileCache, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.FileCacheLifecycleCreating}, - Target: []string{fsx.FileCacheLifecycleAvailable}, + Pending: enum.Slice(awstypes.FileCacheLifecycleCreating), + Target: enum.Slice(awstypes.FileCacheLifecycleAvailable), Refresh: statusFileCache(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, @@ -523,9 +516,9 @@ func waitFileCacheCreated(ctx context.Context, conn *fsx.FSx, id string, timeout outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.FileCache); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + if output, ok := outputRaw.(*awstypes.FileCache); ok { + if status, details := output.Lifecycle, output.FailureDetails; status == awstypes.FileCacheLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.FailureDetails.Message))) } return output, err @@ -533,10 +526,10 @@ func waitFileCacheCreated(ctx context.Context, conn *fsx.FSx, id string, timeout return nil, err } -func waitFileCacheUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { +func waitFileCacheUpdated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.FileCache, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.FileCacheLifecycleUpdating}, - Target: []string{fsx.FileCacheLifecycleAvailable}, + Pending: enum.Slice(awstypes.FileCacheLifecycleUpdating), + Target: enum.Slice(awstypes.FileCacheLifecycleAvailable), Refresh: statusFileCache(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, @@ -544,9 +537,9 @@ func waitFileCacheUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.FileCache); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + if output, ok := outputRaw.(*awstypes.FileCache); ok { + if status, details := output.Lifecycle, output.FailureDetails; status == awstypes.FileCacheLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.FailureDetails.Message))) } return output, err @@ -555,9 +548,9 @@ func waitFileCacheUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout return nil, err } -func waitFileCacheDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileCache, error) { +func waitFileCacheDeleted(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.FileCache, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.FileCacheLifecycleAvailable, fsx.FileCacheLifecycleDeleting}, + Pending: enum.Slice(awstypes.FileCacheLifecycleAvailable, awstypes.FileCacheLifecycleDeleting), Target: []string{}, Refresh: statusFileCache(ctx, conn, id), Timeout: timeout, @@ -566,9 +559,9 @@ func waitFileCacheDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.FileCache); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileCacheLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + if output, ok := outputRaw.(*awstypes.FileCache); ok { + if status, details := output.Lifecycle, output.FailureDetails; status == awstypes.FileCacheLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.FailureDetails.Message))) } return output, err @@ -577,15 +570,15 @@ func waitFileCacheDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout return nil, err } -func findDataRepositoryAssociationsByIDs(ctx context.Context, conn *fsx.FSx, ids []string) ([]*fsx.DataRepositoryAssociation, error) { +func findDataRepositoryAssociationsByIDs(ctx context.Context, conn *fsx.Client, ids []string) ([]awstypes.DataRepositoryAssociation, error) { input := &fsx.DescribeDataRepositoryAssociationsInput{ - AssociationIds: aws.StringSlice(ids), + AssociationIds: ids, } - return findDataRepositoryAssociations(ctx, conn, input, tfslices.PredicateTrue[*fsx.DataRepositoryAssociation]()) + return findDataRepositoryAssociations(ctx, conn, input, tfslices.PredicateTrue[*awstypes.DataRepositoryAssociation]()) } -func flattenDataRepositoryAssociations(ctx context.Context, dataRepositoryAssociations []*fsx.DataRepositoryAssociation, defaultTagsConfig *tftags.DefaultConfig, ignoreTagsConfig *tftags.IgnoreConfig) []interface{} { +func flattenDataRepositoryAssociations(ctx context.Context, dataRepositoryAssociations []awstypes.DataRepositoryAssociation, defaultTagsConfig *tftags.DefaultConfig, ignoreTagsConfig *tftags.IgnoreConfig) []interface{} { if len(dataRepositoryAssociations) == 0 { return nil } @@ -598,7 +591,7 @@ func flattenDataRepositoryAssociations(ctx context.Context, dataRepositoryAssoci values := map[string]interface{}{ names.AttrAssociationID: dataRepositoryAssociation.AssociationId, "data_repository_path": dataRepositoryAssociation.DataRepositoryPath, - "data_repository_subdirectories": aws.StringValueSlice(dataRepositoryAssociation.DataRepositorySubdirectories), + "data_repository_subdirectories": dataRepositoryAssociation.DataRepositorySubdirectories, "file_cache_id": dataRepositoryAssociation.FileCacheId, "file_cache_path": dataRepositoryAssociation.FileCachePath, "imported_file_chunk_size": dataRepositoryAssociation.ImportedFileChunkSize, @@ -611,27 +604,26 @@ func flattenDataRepositoryAssociations(ctx context.Context, dataRepositoryAssoci return flattenedDataRepositoryAssociations } -func flattenNFSDataRepositoryConfiguration(nfsDataRepositoryConfiguration *fsx.NFSDataRepositoryConfiguration) []map[string]interface{} { +func flattenNFSDataRepositoryConfiguration(nfsDataRepositoryConfiguration *awstypes.NFSDataRepositoryConfiguration) []map[string]interface{} { if nfsDataRepositoryConfiguration == nil { return []map[string]interface{}{} } values := map[string]interface{}{ - "dns_ips": aws.StringValueSlice(nfsDataRepositoryConfiguration.DnsIps), - names.AttrVersion: aws.StringValue(nfsDataRepositoryConfiguration.Version), + "dns_ips": nfsDataRepositoryConfiguration.DnsIps, + names.AttrVersion: string(nfsDataRepositoryConfiguration.Version), } return []map[string]interface{}{values} } -func flattenFileCacheLustreConfiguration(fileCacheLustreConfiguration *fsx.FileCacheLustreConfiguration) []interface{} { +func flattenFileCacheLustreConfiguration(fileCacheLustreConfiguration *awstypes.FileCacheLustreConfiguration) []interface{} { if fileCacheLustreConfiguration == nil { return []interface{}{} } values := make(map[string]interface{}) - if fileCacheLustreConfiguration.DeploymentType != nil { - values["deployment_type"] = aws.StringValue(fileCacheLustreConfiguration.DeploymentType) - } + values["deployment_type"] = string(fileCacheLustreConfiguration.DeploymentType) + if fileCacheLustreConfiguration.LogConfiguration != nil { values["log_configuration"] = flattenLustreLogConfiguration(fileCacheLustreConfiguration.LogConfiguration) } @@ -639,33 +631,33 @@ func flattenFileCacheLustreConfiguration(fileCacheLustreConfiguration *fsx.FileC values["metadata_configuration"] = flattenFileCacheLustreMetadataConfiguration(fileCacheLustreConfiguration.MetadataConfiguration) } if fileCacheLustreConfiguration.MountName != nil { - values["mount_name"] = aws.StringValue(fileCacheLustreConfiguration.MountName) + values["mount_name"] = aws.ToString(fileCacheLustreConfiguration.MountName) } if fileCacheLustreConfiguration.PerUnitStorageThroughput != nil { - values["per_unit_storage_throughput"] = aws.Int64Value(fileCacheLustreConfiguration.PerUnitStorageThroughput) + values["per_unit_storage_throughput"] = aws.ToInt32(fileCacheLustreConfiguration.PerUnitStorageThroughput) } if fileCacheLustreConfiguration.WeeklyMaintenanceStartTime != nil { - values["weekly_maintenance_start_time"] = aws.StringValue(fileCacheLustreConfiguration.WeeklyMaintenanceStartTime) + values["weekly_maintenance_start_time"] = aws.ToString(fileCacheLustreConfiguration.WeeklyMaintenanceStartTime) } return []interface{}{values} } -func flattenFileCacheLustreMetadataConfiguration(fileCacheLustreMetadataConfiguration *fsx.FileCacheLustreMetadataConfiguration) []interface{} { +func flattenFileCacheLustreMetadataConfiguration(fileCacheLustreMetadataConfiguration *awstypes.FileCacheLustreMetadataConfiguration) []interface{} { values := make(map[string]interface{}) if fileCacheLustreMetadataConfiguration.StorageCapacity != nil { - values["storage_capacity"] = aws.Int64Value(fileCacheLustreMetadataConfiguration.StorageCapacity) + values["storage_capacity"] = aws.ToInt32(fileCacheLustreMetadataConfiguration.StorageCapacity) } return []interface{}{values} } -func expandDataRepositoryAssociations(l []interface{}) []*fsx.FileCacheDataRepositoryAssociation { +func expandDataRepositoryAssociations(l []interface{}) []awstypes.FileCacheDataRepositoryAssociation { if len(l) == 0 { return nil } - var dataRepositoryAssociations []*fsx.FileCacheDataRepositoryAssociation + var dataRepositoryAssociations []awstypes.FileCacheDataRepositoryAssociation for _, tfMapRaw := range l { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -673,13 +665,13 @@ func expandDataRepositoryAssociations(l []interface{}) []*fsx.FileCacheDataRepos if !ok { continue } - req := &fsx.FileCacheDataRepositoryAssociation{} + req := awstypes.FileCacheDataRepositoryAssociation{} if v, ok := tfMap["data_repository_path"].(string); ok { req.DataRepositoryPath = aws.String(v) } if v, ok := tfMap["data_repository_subdirectories"]; ok { - req.DataRepositorySubdirectories = flex.ExpandStringSet(v.(*schema.Set)) + req.DataRepositorySubdirectories = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := tfMap["file_cache_path"].(string); ok { req.FileCachePath = aws.String(v) @@ -693,30 +685,30 @@ func expandDataRepositoryAssociations(l []interface{}) []*fsx.FileCacheDataRepos return dataRepositoryAssociations } -func expandFileCacheNFSConfiguration(l []interface{}) *fsx.FileCacheNFSConfiguration { +func expandFileCacheNFSConfiguration(l []interface{}) *awstypes.FileCacheNFSConfiguration { if len(l) == 0 || l[0] == nil { return nil } data := l[0].(map[string]interface{}) - req := &fsx.FileCacheNFSConfiguration{} + req := &awstypes.FileCacheNFSConfiguration{} if v, ok := data["dns_ips"]; ok { - req.DnsIps = flex.ExpandStringSet(v.(*schema.Set)) + req.DnsIps = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := data[names.AttrVersion].(string); ok { - req.Version = aws.String(v) + req.Version = awstypes.NfsVersion(v) } return req } -func expandUpdateFileCacheLustreConfiguration(l []interface{}) *fsx.UpdateFileCacheLustreConfiguration { +func expandUpdateFileCacheLustreConfiguration(l []interface{}) *awstypes.UpdateFileCacheLustreConfiguration { if len(l) == 0 || l[0] == nil { return nil } data := l[0].(map[string]interface{}) - req := &fsx.UpdateFileCacheLustreConfiguration{} + req := &awstypes.UpdateFileCacheLustreConfiguration{} if v, ok := data["weekly_maintenance_start_time"].(string); ok { req.WeeklyMaintenanceStartTime = aws.String(v) @@ -725,21 +717,21 @@ func expandUpdateFileCacheLustreConfiguration(l []interface{}) *fsx.UpdateFileCa return req } -func expandCreateFileCacheLustreConfiguration(l []interface{}) *fsx.CreateFileCacheLustreConfiguration { +func expandCreateFileCacheLustreConfiguration(l []interface{}) *awstypes.CreateFileCacheLustreConfiguration { if len(l) == 0 || l[0] == nil { return nil } data := l[0].(map[string]interface{}) - req := &fsx.CreateFileCacheLustreConfiguration{} + req := &awstypes.CreateFileCacheLustreConfiguration{} if v, ok := data["deployment_type"].(string); ok { - req.DeploymentType = aws.String(v) + req.DeploymentType = awstypes.FileCacheLustreDeploymentType(v) } if v, ok := data["metadata_configuration"]; ok && len(v.(*schema.Set).List()) > 0 { req.MetadataConfiguration = expandFileCacheLustreMetadataConfiguration(v.(*schema.Set).List()) } if v, ok := data["per_unit_storage_throughput"].(int); ok { - req.PerUnitStorageThroughput = aws.Int64(int64(v)) + req.PerUnitStorageThroughput = aws.Int32(int32(v)) } if v, ok := data["weekly_maintenance_start_time"].(string); ok { req.WeeklyMaintenanceStartTime = aws.String(v) @@ -748,15 +740,15 @@ func expandCreateFileCacheLustreConfiguration(l []interface{}) *fsx.CreateFileCa return req } -func expandFileCacheLustreMetadataConfiguration(l []interface{}) *fsx.FileCacheLustreMetadataConfiguration { +func expandFileCacheLustreMetadataConfiguration(l []interface{}) *awstypes.FileCacheLustreMetadataConfiguration { if len(l) == 0 || l[0] == nil { return nil } data := l[0].(map[string]interface{}) - req := &fsx.FileCacheLustreMetadataConfiguration{} + req := &awstypes.FileCacheLustreMetadataConfiguration{} if v, ok := data["storage_capacity"].(int); ok { - req.StorageCapacity = aws.Int64(int64(v)) + req.StorageCapacity = aws.Int32(int32(v)) } return req } diff --git a/internal/service/fsx/file_cache_test.go b/internal/service/fsx/file_cache_test.go index 515aec62e900..fa3e04d1d802 100644 --- a/internal/service/fsx/file_cache_test.go +++ b/internal/service/fsx/file_cache_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -47,14 +47,14 @@ func testAccFileCache_basic(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache fsx.FileCache + var filecache awstypes.FileCache resourceName := "aws_fsx_file_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -93,14 +93,14 @@ func testAccFileCache_disappears(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache fsx.FileCache + var filecache awstypes.FileCache resourceName := "aws_fsx_file_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -126,12 +126,12 @@ func testAccFileCache_copyTagsToDataRepositoryAssociations(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache1 fsx.FileCache + var filecache1 awstypes.FileCache resourceName := "aws_fsx_file_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFileCacheDestroy(ctx), @@ -160,12 +160,12 @@ func testAccFileCache_dataRepositoryAssociation_multiple(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache fsx.FileCache + var filecache awstypes.FileCache resourceName := "aws_fsx_file_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFileCacheDestroy(ctx), @@ -193,12 +193,12 @@ func testAccFileCache_dataRepositoryAssociation_nfs(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache fsx.FileCache + var filecache awstypes.FileCache resourceName := "aws_fsx_file_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFileCacheDestroy(ctx), @@ -230,12 +230,12 @@ func testAccFileCache_dataRepositoryAssociation_s3(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache fsx.FileCache + var filecache awstypes.FileCache resourceName := "aws_fsx_file_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFileCacheDestroy(ctx), @@ -265,14 +265,14 @@ func testAccFileCache_kmsKeyID(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache1, filecache2 fsx.FileCache + var filecache1, filecache2 awstypes.FileCache kmsKeyResourceName1 := "aws_kms_key.test1" kmsKeyResourceName2 := "aws_kms_key.test2" resourceName := "aws_fsx_file_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFileCacheDestroy(ctx), @@ -314,12 +314,12 @@ func testAccFileCache_securityGroupID(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache1 fsx.FileCache + var filecache1 awstypes.FileCache resourceName := "aws_fsx_file_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFileCacheDestroy(ctx), @@ -347,12 +347,12 @@ func testAccFileCache_tags(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var filecache1, filecache2 fsx.FileCache + var filecache1, filecache2 awstypes.FileCache resourceName := "aws_fsx_file_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFileCacheDestroy(ctx), @@ -393,7 +393,7 @@ func testAccFileCache_tags(t *testing.T) { func testAccCheckFileCacheDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_file_cache" { @@ -417,14 +417,14 @@ func testAccCheckFileCacheDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckFileCacheExists(ctx context.Context, n string, v *fsx.FileCache) resource.TestCheckFunc { +func testAccCheckFileCacheExists(ctx context.Context, n string, v *awstypes.FileCache) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindFileCacheByID(ctx, conn, rs.Primary.ID) @@ -438,20 +438,20 @@ func testAccCheckFileCacheExists(ctx context.Context, n string, v *fsx.FileCache } } -func testAccCheckFileCacheNotRecreated(i, j *fsx.FileCache) resource.TestCheckFunc { +func testAccCheckFileCacheNotRecreated(i, j *awstypes.FileCache) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileCacheId) != aws.StringValue(j.FileCacheId) { - return fmt.Errorf("FSx File System (%s) recreated", aws.StringValue(i.FileCacheId)) + if aws.ToString(i.FileCacheId) != aws.ToString(j.FileCacheId) { + return fmt.Errorf("FSx File System (%s) recreated", aws.ToString(i.FileCacheId)) } return nil } } -func testAccCheckFileCacheRecreated(i, j *fsx.FileCache) resource.TestCheckFunc { +func testAccCheckFileCacheRecreated(i, j *awstypes.FileCache) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileCacheId) == aws.StringValue(j.FileCacheId) { - return fmt.Errorf("FSx File System (%s) not recreated", aws.StringValue(i.FileCacheId)) + if aws.ToString(i.FileCacheId) == aws.ToString(j.FileCacheId) { + return fmt.Errorf("FSx File System (%s) not recreated", aws.ToString(i.FileCacheId)) } return nil diff --git a/internal/service/fsx/generate.go b/internal/service/fsx/generate.go index f79c17d8d924..33217bbbc242 100644 --- a/internal/service/fsx/generate.go +++ b/internal/service/fsx/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsInIDElem=ResourceARN -ServiceTagsSlice -TagInIDElem=ResourceARN -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ListTagsInIDElem=ResourceARN -ServiceTagsSlice -TagInIDElem=ResourceARN -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index f530e9cff506..d68d61fb621d 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -12,10 +12,10 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -23,6 +23,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -61,10 +63,10 @@ func resourceLustreFileSystem() *schema.Resource { Computed: true, }, "auto_import_policy": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(fsx.AutoImportPolicyType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.AutoImportPolicyType](), }, "automatic_backup_retention_days": { Type: schema.TypeInt, @@ -93,27 +95,27 @@ func resourceLustreFileSystem() *schema.Resource { ), }, "data_compression_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(fsx.DataCompressionType_Values(), false), - Default: fsx.DataCompressionTypeNone, + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.DataCompressionType](), + Default: awstypes.DataCompressionTypeNone, }, "deployment_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: fsx.LustreDeploymentTypeScratch1, - ValidateFunc: validation.StringInSlice(fsx.LustreDeploymentType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.LustreDeploymentTypeScratch1, + ValidateDiagFunc: enum.Validate[awstypes.LustreDeploymentType](), }, names.AttrDNSName: { Type: schema.TypeString, Computed: true, }, "drive_cache_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(fsx.DriveCacheType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.DriveCacheType](), }, "export_path": { Type: schema.TypeString, @@ -177,9 +179,9 @@ func resourceLustreFileSystem() *schema.Resource { }, }, "level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(fsx.LustreAccessAuditLogLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.LustreAccessAuditLogLevel](), }, }, }, @@ -195,7 +197,7 @@ func resourceLustreFileSystem() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateDiagFunc: validation.ToDiagFunc(validation.StringInSlice(fsx.MetadataConfigurationMode_Values(), false)), + ValidateDiagFunc: enum.Validate[awstypes.MetadataConfigurationMode](), }, names.AttrIOPS: { Type: schema.TypeInt, @@ -276,11 +278,11 @@ func resourceLustreFileSystem() *schema.Resource { ValidateFunc: validation.IntAtLeast(1200), }, names.AttrStorageType: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: fsx.StorageTypeSsd, - ValidateFunc: validation.StringInSlice(fsx.StorageType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.StorageTypeSsd, + ValidateDiagFunc: enum.Validate[awstypes.StorageType](), }, names.AttrSubnetIDs: { Type: schema.TypeList, @@ -319,7 +321,7 @@ func resourceLustreFileSystemStorageCapacityCustomizeDiff(_ context.Context, d * // we want to force a new resource if the new storage capacity is less than the old one if d.HasChange("storage_capacity") { o, n := d.GetChange("storage_capacity") - if n.(int) < o.(int) || d.Get("deployment_type").(string) == fsx.LustreDeploymentTypeScratch1 { + if n.(int) < o.(int) || d.Get("deployment_type").(string) == string(awstypes.LustreDeploymentTypeScratch1) { if err := d.ForceNew("storage_capacity"); err != nil { return err } @@ -334,8 +336,8 @@ func resourceLustreFileSystemMetadataConfigCustomizeDiff(_ context.Context, d *s if v, ok := d.GetOk("metadata_configuration"); ok { if len(v.([]any)) > 0 { deploymentType := d.Get("deployment_type").(string) - if deploymentType != fsx.LustreDeploymentTypePersistent2 { - return fmt.Errorf("metadata_configuration can only be set when deployment type is " + fsx.LustreDeploymentTypePersistent2) + if deploymentType != string(awstypes.LustreDeploymentTypePersistent2) { + return fmt.Errorf("metadata_configuration can only be set when deployment type is " + string(awstypes.LustreDeploymentTypePersistent2)) } } } @@ -343,7 +345,7 @@ func resourceLustreFileSystemMetadataConfigCustomizeDiff(_ context.Context, d *s // we want to force a new resource if the new Iops is less than the old one if d.HasChange("metadata_configuration") { if v, ok := d.GetOk("metadata_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - if mode := d.Get("metadata_configuration.0.mode"); mode == fsx.MetadataConfigurationModeUserProvisioned { + if mode := d.Get("metadata_configuration.0.mode"); mode == awstypes.MetadataConfigurationModeUserProvisioned { o, n := d.GetChange("metadata_configuration") oldV := o.([]interface{}) @@ -376,37 +378,37 @@ func resourceLustreFileSystemMetadataConfigCustomizeDiff(_ context.Context, d *s func resourceLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) inputC := &fsx.CreateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), - FileSystemType: aws.String(fsx.FileSystemTypeLustre), - LustreConfiguration: &fsx.CreateFileSystemLustreConfiguration{ - DeploymentType: aws.String(d.Get("deployment_type").(string)), + FileSystemType: awstypes.FileSystemTypeLustre, + LustreConfiguration: &awstypes.CreateFileSystemLustreConfiguration{ + DeploymentType: awstypes.LustreDeploymentType(d.Get("deployment_type").(string)), }, - StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), - StorageType: aws.String(d.Get(names.AttrStorageType).(string)), - SubnetIds: flex.ExpandStringList(d.Get(names.AttrSubnetIDs).([]interface{})), + StorageCapacity: aws.Int32(int32(d.Get("storage_capacity").(int))), + StorageType: awstypes.StorageType(d.Get(names.AttrStorageType).(string)), + SubnetIds: flex.ExpandStringValueList(d.Get(names.AttrSubnetIDs).([]interface{})), Tags: getTagsIn(ctx), } inputB := &fsx.CreateFileSystemFromBackupInput{ ClientRequestToken: aws.String(id.UniqueId()), - LustreConfiguration: &fsx.CreateFileSystemLustreConfiguration{ - DeploymentType: aws.String(d.Get("deployment_type").(string)), + LustreConfiguration: &awstypes.CreateFileSystemLustreConfiguration{ + DeploymentType: awstypes.LustreDeploymentType(d.Get("deployment_type").(string)), }, - StorageType: aws.String(d.Get(names.AttrStorageType).(string)), - SubnetIds: flex.ExpandStringList(d.Get(names.AttrSubnetIDs).([]interface{})), + StorageType: awstypes.StorageType(d.Get(names.AttrStorageType).(string)), + SubnetIds: flex.ExpandStringValueList(d.Get(names.AttrSubnetIDs).([]interface{})), Tags: getTagsIn(ctx), } if v, ok := d.GetOk("auto_import_policy"); ok { - inputC.LustreConfiguration.AutoImportPolicy = aws.String(v.(string)) - inputB.LustreConfiguration.AutoImportPolicy = aws.String(v.(string)) + inputC.LustreConfiguration.AutoImportPolicy = awstypes.AutoImportPolicyType(v.(string)) + inputB.LustreConfiguration.AutoImportPolicy = awstypes.AutoImportPolicyType(v.(string)) } if v, ok := d.GetOk("automatic_backup_retention_days"); ok { - inputC.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) - inputB.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) + inputC.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int32(int32(v.(int))) + inputB.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("copy_tags_to_backups"); ok { @@ -420,13 +422,13 @@ func resourceLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("data_compression_type"); ok { - inputC.LustreConfiguration.DataCompressionType = aws.String(v.(string)) - inputB.LustreConfiguration.DataCompressionType = aws.String(v.(string)) + inputC.LustreConfiguration.DataCompressionType = awstypes.DataCompressionType(v.(string)) + inputB.LustreConfiguration.DataCompressionType = awstypes.DataCompressionType(v.(string)) } if v, ok := d.GetOk("drive_cache_type"); ok { - inputC.LustreConfiguration.DriveCacheType = aws.String(v.(string)) - inputB.LustreConfiguration.DriveCacheType = aws.String(v.(string)) + inputC.LustreConfiguration.DriveCacheType = awstypes.DriveCacheType(v.(string)) + inputB.LustreConfiguration.DriveCacheType = awstypes.DriveCacheType(v.(string)) } if v, ok := d.GetOk("export_path"); ok { @@ -445,8 +447,8 @@ func resourceLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("imported_file_chunk_size"); ok { - inputC.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) - inputB.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) + inputC.LustreConfiguration.ImportedFileChunkSize = aws.Int32(int32(v.(int))) + inputB.LustreConfiguration.ImportedFileChunkSize = aws.Int32(int32(v.(int))) } // Applicable only for TypePersistent1 and TypePersistent2. @@ -466,8 +468,8 @@ func resourceLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("per_unit_storage_throughput"); ok { - inputC.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int))) - inputB.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int))) + inputC.LustreConfiguration.PerUnitStorageThroughput = aws.Int32(int32(v.(int))) + inputB.LustreConfiguration.PerUnitStorageThroughput = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("root_squash_configuration"); ok && len(v.([]interface{})) > 0 { @@ -476,8 +478,8 @@ func resourceLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk(names.AttrSecurityGroupIDs); ok { - inputC.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) - inputB.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + inputC.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) + inputB.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { @@ -489,21 +491,21 @@ func resourceLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, backupID := v.(string) inputB.BackupId = aws.String(backupID) - output, err := conn.CreateFileSystemFromBackupWithContext(ctx, inputB) + output, err := conn.CreateFileSystemFromBackup(ctx, inputB) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for Lustre File System from backup (%s): %s", backupID, err) } - d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) + d.SetId(aws.ToString(output.FileSystem.FileSystemId)) } else { - output, err := conn.CreateFileSystemWithContext(ctx, inputC) + output, err := conn.CreateFileSystem(ctx, inputC) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for Lustre File System: %s", err) } - d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) + d.SetId(aws.ToString(output.FileSystem.FileSystemId)) } if _, err := waitFileSystemCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -515,7 +517,7 @@ func resourceLustreFileSystemCreate(ctx context.Context, d *schema.ResourceData, func resourceLustreFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) filesystem, err := findLustreFileSystemByID(ctx, conn, d.Id()) @@ -532,7 +534,7 @@ func resourceLustreFileSystemRead(ctx context.Context, d *schema.ResourceData, m lustreConfig := filesystem.LustreConfiguration if lustreConfig.DataRepositoryConfiguration == nil { // Initialize an empty structure to simplify d.Set() handling. - lustreConfig.DataRepositoryConfiguration = &fsx.DataRepositoryConfiguration{} + lustreConfig.DataRepositoryConfiguration = &awstypes.DataRepositoryConfiguration{} } d.Set(names.AttrARN, filesystem.ResourceARN) @@ -556,7 +558,7 @@ func resourceLustreFileSystemRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "setting metadata_configuration: %s", err) } d.Set("mount_name", lustreConfig.MountName) - d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)) + d.Set("network_interface_ids", filesystem.NetworkInterfaceIds) d.Set(names.AttrOwnerID, filesystem.OwnerId) d.Set("per_unit_storage_throughput", lustreConfig.PerUnitStorageThroughput) if err := d.Set("root_squash_configuration", flattenLustreRootSquashConfiguration(lustreConfig.RootSquashConfiguration)); err != nil { @@ -564,7 +566,7 @@ func resourceLustreFileSystemRead(ctx context.Context, d *schema.ResourceData, m } d.Set("storage_capacity", filesystem.StorageCapacity) d.Set(names.AttrStorageType, filesystem.StorageType) - d.Set(names.AttrSubnetIDs, aws.StringValueSlice(filesystem.SubnetIds)) + d.Set(names.AttrSubnetIDs, filesystem.SubnetIds) d.Set(names.AttrVPCID, filesystem.VpcId) d.Set("weekly_maintenance_start_time", lustreConfig.WeeklyMaintenanceStartTime) @@ -575,7 +577,7 @@ func resourceLustreFileSystemRead(ctx context.Context, d *schema.ResourceData, m func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChangesExcept( "final_backup_tags", @@ -586,15 +588,15 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), - LustreConfiguration: &fsx.UpdateFileSystemLustreConfiguration{}, + LustreConfiguration: &awstypes.UpdateFileSystemLustreConfiguration{}, } if d.HasChange("auto_import_policy") { - input.LustreConfiguration.AutoImportPolicy = aws.String(d.Get("auto_import_policy").(string)) + input.LustreConfiguration.AutoImportPolicy = awstypes.AutoImportPolicyType(d.Get("auto_import_policy").(string)) } if d.HasChange("automatic_backup_retention_days") { - input.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))) + input.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int32(int32(d.Get("automatic_backup_retention_days").(int))) } if d.HasChange("daily_automatic_backup_start_time") { @@ -602,7 +604,7 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("data_compression_type") { - input.LustreConfiguration.DataCompressionType = aws.String(d.Get("data_compression_type").(string)) + input.LustreConfiguration.DataCompressionType = awstypes.DataCompressionType(d.Get("data_compression_type").(string)) } if d.HasChange("log_configuration") { @@ -614,7 +616,7 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("per_unit_storage_throughput") { - input.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(d.Get("per_unit_storage_throughput").(int))) + input.LustreConfiguration.PerUnitStorageThroughput = aws.Int32(int32(d.Get("per_unit_storage_throughput").(int))) } if d.HasChange("root_squash_configuration") { @@ -622,7 +624,7 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("storage_capacity") { - input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int))) + input.StorageCapacity = aws.Int32(int32(d.Get("storage_capacity").(int))) } if d.HasChange("weekly_maintenance_start_time") { @@ -630,7 +632,7 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, } startTime := time.Now() - _, err := conn.UpdateFileSystemWithContext(ctx, input) + _, err := conn.UpdateFileSystem(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSX for Lustre File System (%s): %s", d.Id(), err) @@ -640,8 +642,8 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) update: %s", d.Id(), err) } - if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Lustre File System (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, err) } } @@ -650,7 +652,7 @@ func resourceLustreFileSystemUpdate(ctx context.Context, d *schema.ResourceData, func resourceLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.DeleteFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), @@ -660,7 +662,7 @@ func resourceLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, // Final backup during delete is not supported on file systems using the Scratch deployment type // LustreConfiguration cannot be supplied at all, even when empty, in this scenario if v, ok := d.GetOk("deployment_type"); ok && !strings.HasPrefix(v.(string), "SCRATCH_") { - lustreConfig := &fsx.DeleteFileSystemLustreConfiguration{ + lustreConfig := &awstypes.DeleteFileSystemLustreConfiguration{ SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), } @@ -672,9 +674,9 @@ func resourceLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, } log.Printf("[DEBUG] Deleting FSx for Lustre File System: %s", d.Id()) - _, err := conn.DeleteFileSystemWithContext(ctx, input) + _, err := conn.DeleteFileSystem(ctx, input) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { + if errs.IsA[*awstypes.FileSystemNotFound](err) { return diags } @@ -689,143 +691,8 @@ func resourceLustreFileSystemDelete(ctx context.Context, d *schema.ResourceData, return diags } -func expandLustreRootSquashConfiguration(l []interface{}) *fsx.LustreRootSquashConfiguration { - if len(l) == 0 || l[0] == nil { - return nil - } - - data := l[0].(map[string]interface{}) - req := &fsx.LustreRootSquashConfiguration{} - - if v, ok := data["root_squash"].(string); ok && v != "" { - req.RootSquash = aws.String(v) - } - - if v, ok := data["no_squash_nids"].(*schema.Set); ok && v.Len() > 0 { - req.NoSquashNids = flex.ExpandStringSet(v) - } - - return req -} - -func flattenLustreRootSquashConfiguration(adopts *fsx.LustreRootSquashConfiguration) []map[string]interface{} { - if adopts == nil { - return []map[string]interface{}{} - } - - m := map[string]interface{}{} - - if adopts.RootSquash != nil { - m["root_squash"] = aws.StringValue(adopts.RootSquash) - } - - if adopts.NoSquashNids != nil { - m["no_squash_nids"] = flex.FlattenStringSet(adopts.NoSquashNids) - } - - return []map[string]interface{}{m} -} - -func expandLustreLogCreateConfiguration(l []interface{}) *fsx.LustreLogCreateConfiguration { - if len(l) == 0 || l[0] == nil { - return nil - } - - data := l[0].(map[string]interface{}) - req := &fsx.LustreLogCreateConfiguration{ - Level: aws.String(data["level"].(string)), - } - - if v, ok := data[names.AttrDestination].(string); ok && v != "" { - req.Destination = aws.String(logStateFunc(v)) - } - - return req -} - -func flattenLustreLogConfiguration(adopts *fsx.LustreLogConfiguration) []map[string]interface{} { - if adopts == nil { - return []map[string]interface{}{} - } - - m := map[string]interface{}{ - "level": aws.StringValue(adopts.Level), - } - - if adopts.Destination != nil { - m[names.AttrDestination] = aws.StringValue(adopts.Destination) - } - - return []map[string]interface{}{m} -} - -func expandLustreMetadataCreateConfiguration(l []interface{}) *fsx.CreateFileSystemLustreMetadataConfiguration { - if len(l) == 0 || l[0] == nil { - return nil - } - - data := l[0].(map[string]interface{}) - req := &fsx.CreateFileSystemLustreMetadataConfiguration{ - Mode: aws.String(data[names.AttrMode].(string)), - } - - if v, ok := data[names.AttrIOPS].(int); ok && v != 0 { - req.Iops = aws.Int64(int64(v)) - } - - return req -} - -func expandLustreMetadataUpdateConfiguration(l []interface{}) *fsx.UpdateFileSystemLustreMetadataConfiguration { - if len(l) == 0 || l[0] == nil { - return nil - } - - data := l[0].(map[string]interface{}) - req := &fsx.UpdateFileSystemLustreMetadataConfiguration{ - Mode: aws.String(data[names.AttrMode].(string)), - } - - if v, ok := data[names.AttrIOPS].(int); ok && v != 0 { - req.Iops = aws.Int64(int64(v)) - } - - return req -} - -func flattenLustreMetadataConfiguration(adopts *fsx.FileSystemLustreMetadataConfiguration) []map[string]interface{} { - if adopts == nil { - return []map[string]interface{}{} - } - - m := map[string]interface{}{ - names.AttrMode: aws.StringValue(adopts.Mode), - } - - if adopts.Iops != nil { - m[names.AttrIOPS] = aws.Int64Value(adopts.Iops) - } - - return []map[string]interface{}{m} -} - -func logStateFunc(v interface{}) string { - value := v.(string) - // API returns the specific log stream arn instead of provided log group - logArn, _ := arn.Parse(value) - if logArn.Service == "logs" { - parts := strings.SplitN(logArn.Resource, ":", 3) - if len(parts) == 3 { - return strings.TrimSuffix(value, fmt.Sprintf(":%s", parts[2])) - } else { - return value - } - } - return value -} - -func findLustreFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { - output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeLustre) +func findLustreFileSystemByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.FileSystem, error) { + output, err := findFileSystemByIDAndType(ctx, conn, id, awstypes.FileSystemTypeLustre) if err != nil { return nil, err @@ -838,67 +705,64 @@ func findLustreFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*f return output, nil } -func findFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { +func findFileSystemByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.FileSystem, error) { input := &fsx.DescribeFileSystemsInput{ - FileSystemIds: aws.StringSlice([]string{id}), + FileSystemIds: []string{id}, } - return findFileSystem(ctx, conn, input, tfslices.PredicateTrue[*fsx.FileSystem]()) + return findFileSystem(ctx, conn, input, tfslices.PredicateTrue[*awstypes.FileSystem]()) } -func findFileSystemByIDAndType(ctx context.Context, conn *fsx.FSx, fsID, fsType string) (*fsx.FileSystem, error) { +func findFileSystemByIDAndType(ctx context.Context, conn *fsx.Client, fsID string, fsType awstypes.FileSystemType) (*awstypes.FileSystem, error) { input := &fsx.DescribeFileSystemsInput{ - FileSystemIds: aws.StringSlice([]string{fsID}), + FileSystemIds: []string{fsID}, } - filter := func(fs *fsx.FileSystem) bool { - return aws.StringValue(fs.FileSystemType) == fsType + filter := func(v *awstypes.FileSystem) bool { + return v.FileSystemType == fsType } return findFileSystem(ctx, conn, input, filter) } -func findFileSystem(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeFileSystemsInput, filter tfslices.Predicate[*fsx.FileSystem]) (*fsx.FileSystem, error) { +func findFileSystem(ctx context.Context, conn *fsx.Client, input *fsx.DescribeFileSystemsInput, filter tfslices.Predicate[*awstypes.FileSystem]) (*awstypes.FileSystem, error) { output, err := findFileSystems(ctx, conn, input, filter) if err != nil { - return nil, err + return &awstypes.FileSystem{}, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findFileSystems(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeFileSystemsInput, filter tfslices.Predicate[*fsx.FileSystem]) ([]*fsx.FileSystem, error) { - var output []*fsx.FileSystem +func findFileSystems(ctx context.Context, conn *fsx.Client, input *fsx.DescribeFileSystemsInput, filter tfslices.Predicate[*awstypes.FileSystem]) ([]awstypes.FileSystem, error) { + var output []awstypes.FileSystem - err := conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := fsx.NewDescribeFileSystemsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.FileSystems { - if v != nil && filter(v) { - output = append(output, v) + if errs.IsA[*awstypes.FileSystemNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.FileSystems { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusFileSystem(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { +func statusFileSystem(ctx context.Context, conn *fsx.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findFileSystemByID(ctx, conn, id) @@ -910,14 +774,14 @@ func statusFileSystem(ctx context.Context, conn *fsx.FSx, id string) retry.State return nil, "", err } - return output, aws.StringValue(output.Lifecycle), nil + return output, string(output.Lifecycle), nil } } -func waitFileSystemCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileSystem, error) { //nolint:unparam +func waitFileSystemCreated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.FileSystem, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.FileSystemLifecycleCreating}, - Target: []string{fsx.FileSystemLifecycleAvailable}, + Pending: enum.Slice(awstypes.FileSystemLifecycleCreating), + Target: enum.Slice(awstypes.FileSystemLifecycleAvailable), Refresh: statusFileSystem(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, @@ -928,9 +792,9 @@ func waitFileSystemCreated(ctx context.Context, conn *fsx.FSx, id string, timeou outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.FileSystem); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileSystemLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(details.Message))) + if output, ok := outputRaw.(*awstypes.FileSystem); ok { + if status, details := output.Lifecycle, output.FailureDetails; status == awstypes.FileSystemLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(details.Message))) } return output, err @@ -939,10 +803,10 @@ func waitFileSystemCreated(ctx context.Context, conn *fsx.FSx, id string, timeou return nil, err } -func waitFileSystemUpdated(ctx context.Context, conn *fsx.FSx, id string, startTime time.Time, timeout time.Duration) (*fsx.FileSystem, error) { //nolint:unparam +func waitFileSystemUpdated(ctx context.Context, conn *fsx.Client, id string, startTime time.Time, timeout time.Duration) (*awstypes.FileSystem, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.FileSystemLifecycleUpdating}, - Target: []string{fsx.FileSystemLifecycleAvailable}, + Pending: enum.Slice(awstypes.FileSystemLifecycleUpdating), + Target: enum.Slice(awstypes.FileSystemLifecycleAvailable), Refresh: statusFileSystem(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, @@ -950,20 +814,20 @@ func waitFileSystemUpdated(ctx context.Context, conn *fsx.FSx, id string, startT outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.FileSystem); ok { - switch status := aws.StringValue(output.Lifecycle); status { - case fsx.FileSystemLifecycleFailed, fsx.FileSystemLifecycleMisconfigured, fsx.FileSystemLifecycleMisconfiguredUnavailable: + if output, ok := outputRaw.(*awstypes.FileSystem); ok { + switch status := output.Lifecycle; status { + case awstypes.FileSystemLifecycleFailed, awstypes.FileSystemLifecycleMisconfigured, awstypes.FileSystemLifecycleMisconfiguredUnavailable: // Report any failed non-FILE_SYSTEM_UPDATE administrative actions. // See https://docs.aws.amazon.com/fsx/latest/APIReference/API_AdministrativeAction.html#FSx-Type-AdministrativeAction-AdministrativeActionType. - administrativeActions := tfslices.Filter(output.AdministrativeActions, func(v *fsx.AdministrativeAction) bool { - return v != nil && aws.StringValue(v.Status) == fsx.StatusFailed && aws.StringValue(v.AdministrativeActionType) != fsx.AdministrativeActionTypeFileSystemUpdate && v.FailureDetails != nil && startTime.Before(aws.TimeValue(v.RequestTime)) + administrativeActions := tfslices.Filter(output.AdministrativeActions, func(v awstypes.AdministrativeAction) bool { + return v.Status == awstypes.StatusFailed && v.AdministrativeActionType != awstypes.AdministrativeActionTypeFileSystemUpdate && v.FailureDetails != nil && startTime.Before(aws.ToTime(v.RequestTime)) }) - administrativeActionsError := errors.Join(tfslices.ApplyToAll(administrativeActions, func(v *fsx.AdministrativeAction) error { - return fmt.Errorf("%s: %s", aws.StringValue(v.AdministrativeActionType), aws.StringValue(v.FailureDetails.Message)) + administrativeActionsError := errors.Join(tfslices.ApplyToAll(administrativeActions, func(v awstypes.AdministrativeAction) error { + return fmt.Errorf("%s: %s", string(v.AdministrativeActionType), aws.ToString(v.FailureDetails.Message)) })...) if details := output.FailureDetails; details != nil { - if message := aws.StringValue(details.Message); administrativeActionsError != nil { + if message := aws.ToString(details.Message); administrativeActionsError != nil { tfresource.SetLastError(err, fmt.Errorf("%s: %w", message, administrativeActionsError)) } else { tfresource.SetLastError(err, errors.New(message)) @@ -979,9 +843,9 @@ func waitFileSystemUpdated(ctx context.Context, conn *fsx.FSx, id string, startT return nil, err } -func waitFileSystemDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.FileSystem, error) { //nolint:unparam +func waitFileSystemDeleted(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.FileSystem, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.FileSystemLifecycleAvailable, fsx.FileSystemLifecycleDeleting}, + Pending: enum.Slice(awstypes.FileSystemLifecycleAvailable, awstypes.FileSystemLifecycleDeleting), Target: []string{}, Refresh: statusFileSystem(ctx, conn, id), Timeout: timeout, @@ -990,9 +854,9 @@ func waitFileSystemDeleted(ctx context.Context, conn *fsx.FSx, id string, timeou outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.FileSystem); ok { - if status, details := aws.StringValue(output.Lifecycle), output.FailureDetails; status == fsx.FileSystemLifecycleFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(details.Message))) + if output, ok := outputRaw.(*awstypes.FileSystem); ok { + if status, details := output.Lifecycle, output.FailureDetails; status == awstypes.FileSystemLifecycleFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(details.Message))) } return output, err @@ -1001,28 +865,24 @@ func waitFileSystemDeleted(ctx context.Context, conn *fsx.FSx, id string, timeou return nil, err } -func findFileSystemAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) (*fsx.AdministrativeAction, error) { +func findFileSystemAdministrativeAction(ctx context.Context, conn *fsx.Client, fsID string, actionType awstypes.AdministrativeActionType) (awstypes.AdministrativeAction, error) { output, err := findFileSystemByID(ctx, conn, fsID) if err != nil { - return nil, err + return awstypes.AdministrativeAction{}, err } for _, v := range output.AdministrativeActions { - if v == nil { - continue - } - - if aws.StringValue(v.AdministrativeActionType) == actionType { + if v.AdministrativeActionType == actionType { return v, nil } } // If the administrative action isn't found, assume it's complete. - return &fsx.AdministrativeAction{Status: aws.String(fsx.StatusCompleted)}, nil + return awstypes.AdministrativeAction{Status: awstypes.StatusCompleted}, nil } -func statusFileSystemAdministrativeAction(ctx context.Context, conn *fsx.FSx, fsID, actionType string) retry.StateRefreshFunc { +func statusFileSystemAdministrativeAction(ctx context.Context, conn *fsx.Client, fsID string, actionType awstypes.AdministrativeActionType) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findFileSystemAdministrativeAction(ctx, conn, fsID, actionType) @@ -1034,14 +894,14 @@ func statusFileSystemAdministrativeAction(ctx context.Context, conn *fsx.FSx, fs return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func waitFileSystemAdministrativeActionCompleted(ctx context.Context, conn *fsx.FSx, fsID, actionType string, timeout time.Duration) (*fsx.AdministrativeAction, error) { //nolint:unparam +func waitFileSystemAdministrativeActionCompleted(ctx context.Context, conn *fsx.Client, fsID string, actionType awstypes.AdministrativeActionType, timeout time.Duration) (*awstypes.AdministrativeAction, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.StatusInProgress, fsx.StatusPending}, - Target: []string{fsx.StatusCompleted, fsx.StatusUpdatedOptimizing}, + Pending: enum.Slice(awstypes.StatusInProgress, awstypes.StatusPending), + Target: enum.Slice(awstypes.StatusCompleted, awstypes.StatusUpdatedOptimizing), Refresh: statusFileSystemAdministrativeAction(ctx, conn, fsID, actionType), Timeout: timeout, Delay: 30 * time.Second, @@ -1049,9 +909,9 @@ func waitFileSystemAdministrativeActionCompleted(ctx context.Context, conn *fsx. outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.AdministrativeAction); ok { - if status, details := aws.StringValue(output.Status), output.FailureDetails; status == fsx.StatusFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) + if output, ok := outputRaw.(*awstypes.AdministrativeAction); ok { + if status, details := output.Status, output.FailureDetails; status == awstypes.StatusFailed && details != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.FailureDetails.Message))) } return output, err @@ -1059,3 +919,138 @@ func waitFileSystemAdministrativeActionCompleted(ctx context.Context, conn *fsx. return nil, err } + +func expandLustreRootSquashConfiguration(l []interface{}) *awstypes.LustreRootSquashConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &awstypes.LustreRootSquashConfiguration{} + + if v, ok := data["root_squash"].(string); ok && v != "" { + req.RootSquash = aws.String(v) + } + + if v, ok := data["no_squash_nids"].(*schema.Set); ok && v.Len() > 0 { + req.NoSquashNids = flex.ExpandStringValueSet(v) + } + + return req +} + +func flattenLustreRootSquashConfiguration(adopts *awstypes.LustreRootSquashConfiguration) []map[string]interface{} { + if adopts == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{} + + if adopts.RootSquash != nil { + m["root_squash"] = aws.ToString(adopts.RootSquash) + } + + if adopts.NoSquashNids != nil { + m["no_squash_nids"] = flex.FlattenStringValueSet(adopts.NoSquashNids) + } + + return []map[string]interface{}{m} +} + +func expandLustreLogCreateConfiguration(l []interface{}) *awstypes.LustreLogCreateConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &awstypes.LustreLogCreateConfiguration{ + Level: awstypes.LustreAccessAuditLogLevel(data["level"].(string)), + } + + if v, ok := data[names.AttrDestination].(string); ok && v != "" { + req.Destination = aws.String(logStateFunc(v)) + } + + return req +} + +func flattenLustreLogConfiguration(adopts *awstypes.LustreLogConfiguration) []map[string]interface{} { + if adopts == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "level": string(adopts.Level), + } + + if adopts.Destination != nil { + m[names.AttrDestination] = aws.ToString(adopts.Destination) + } + + return []map[string]interface{}{m} +} + +func expandLustreMetadataCreateConfiguration(l []interface{}) *awstypes.CreateFileSystemLustreMetadataConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &awstypes.CreateFileSystemLustreMetadataConfiguration{ + Mode: awstypes.MetadataConfigurationMode(data[names.AttrMode].(string)), + } + + if v, ok := data[names.AttrIOPS].(int); ok && v != 0 { + req.Iops = aws.Int32(int32(v)) + } + + return req +} + +func expandLustreMetadataUpdateConfiguration(l []interface{}) *awstypes.UpdateFileSystemLustreMetadataConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &awstypes.UpdateFileSystemLustreMetadataConfiguration{ + Mode: awstypes.MetadataConfigurationMode(data[names.AttrMode].(string)), + } + + if v, ok := data[names.AttrIOPS].(int); ok && v != 0 { + req.Iops = aws.Int32(int32(v)) + } + + return req +} + +func flattenLustreMetadataConfiguration(adopts *awstypes.FileSystemLustreMetadataConfiguration) []map[string]interface{} { + if adopts == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + names.AttrMode: string(adopts.Mode), + } + + if adopts.Iops != nil { + m[names.AttrIOPS] = aws.ToInt32(adopts.Iops) + } + + return []map[string]interface{}{m} +} + +func logStateFunc(v interface{}) string { + value := v.(string) + // API returns the specific log stream arn instead of provided log group + logArn, _ := arn.Parse(value) + if logArn.Service == "logs" { + parts := strings.SplitN(logArn.Resource, ":", 3) + if len(parts) == 3 { + return strings.TrimSuffix(value, fmt.Sprintf(":%s", parts[2])) + } else { + return value + } + } + return value +} diff --git a/internal/service/fsx/lustre_file_system_test.go b/internal/service/fsx/lustre_file_system_test.go index 89f1ec2f64ff..7f5029f0c576 100644 --- a/internal/service/fsx/lustre_file_system_test.go +++ b/internal/service/fsx/lustre_file_system_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/service/fsx" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -24,16 +24,16 @@ import ( func TestAccFSxLustreFileSystem_basic(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - deploymentType := fsx.LustreDeploymentTypeScratch1 + deploymentType := awstypes.LustreDeploymentTypeScratch1 if acctest.Partition() == endpoints.AwsUsGovPartitionID { - deploymentType = fsx.LustreDeploymentTypeScratch2 // SCRATCH_1 not supported in GovCloud + deploymentType = awstypes.LustreDeploymentTypeScratch2 // SCRATCH_1 not supported in GovCloud } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -45,8 +45,8 @@ func TestAccFSxLustreFileSystem_basic(t *testing.T) { acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "fsx", regexache.MustCompile(`file-system/fs-.+`)), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", acctest.CtFalse), - resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeNone), - resource.TestCheckResourceAttr(resourceName, "deployment_type", deploymentType), + resource.TestCheckResourceAttr(resourceName, "data_compression_type", string(awstypes.DataCompressionTypeNone)), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(deploymentType)), resource.TestMatchResourceAttr(resourceName, names.AttrDNSName, regexache.MustCompile(`fs-.+\.fsx\.`)), resource.TestCheckResourceAttr(resourceName, "export_path", ""), resource.TestCheckResourceAttr(resourceName, "import_path", ""), @@ -60,7 +60,7 @@ func TestAccFSxLustreFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1200"), - resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, fsx.StorageTypeSsd), + resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, string(awstypes.StorageTypeSsd)), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), resource.TestMatchResourceAttr(resourceName, names.AttrVPCID, regexache.MustCompile(`^vpc-.+`)), @@ -83,12 +83,12 @@ func TestAccFSxLustreFileSystem_basic(t *testing.T) { func TestAccFSxLustreFileSystem_disappears(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -107,12 +107,12 @@ func TestAccFSxLustreFileSystem_disappears(t *testing.T) { func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -121,7 +121,7 @@ func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { Config: testAccLustreFileSystemConfig_compression(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeLz4), + resource.TestCheckResourceAttr(resourceName, "data_compression_type", string(awstypes.DataCompressionTypeLz4)), ), }, { @@ -138,14 +138,14 @@ func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { Config: testAccLustreFileSystemConfig_basic(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeNone), + resource.TestCheckResourceAttr(resourceName, "data_compression_type", string(awstypes.DataCompressionTypeNone)), ), }, { Config: testAccLustreFileSystemConfig_compression(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "data_compression_type", fsx.DataCompressionTypeLz4), + resource.TestCheckResourceAttr(resourceName, "data_compression_type", string(awstypes.DataCompressionTypeLz4)), ), }, }, @@ -154,14 +154,14 @@ func TestAccFSxLustreFileSystem_dataCompression(t *testing.T) { func TestAccFSxLustreFileSystem_deleteConfig(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) acctest.SkipIfEnvVarNotSet(t, "AWS_FSX_CREATE_FINAL_BACKUP") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -192,12 +192,12 @@ func TestAccFSxLustreFileSystem_deleteConfig(t *testing.T) { func TestAccFSxLustreFileSystem_exportPath(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -236,12 +236,12 @@ func TestAccFSxLustreFileSystem_exportPath(t *testing.T) { // lintignore: AT002 func TestAccFSxLustreFileSystem_importPath(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -278,12 +278,12 @@ func TestAccFSxLustreFileSystem_importPath(t *testing.T) { // lintignore: AT002 func TestAccFSxLustreFileSystem_importedFileChunkSize(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -319,12 +319,12 @@ func TestAccFSxLustreFileSystem_importedFileChunkSize(t *testing.T) { func TestAccFSxLustreFileSystem_securityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -360,12 +360,12 @@ func TestAccFSxLustreFileSystem_securityGroupIDs(t *testing.T) { func TestAccFSxLustreFileSystem_storageCapacity(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -401,12 +401,12 @@ func TestAccFSxLustreFileSystem_storageCapacity(t *testing.T) { func TestAccFSxLustreFileSystem_storageCapacityUpdate(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2, filesystem3 fsx.FileSystem + var filesystem1, filesystem2, filesystem3 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -450,12 +450,12 @@ func TestAccFSxLustreFileSystem_storageCapacityUpdate(t *testing.T) { func TestAccFSxLustreFileSystem_fileSystemTypeVersion(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -491,12 +491,12 @@ func TestAccFSxLustreFileSystem_fileSystemTypeVersion(t *testing.T) { func TestAccFSxLustreFileSystem_tags(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2, filesystem3 fsx.FileSystem + var filesystem1, filesystem2, filesystem3 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -544,12 +544,12 @@ func TestAccFSxLustreFileSystem_tags(t *testing.T) { func TestAccFSxLustreFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -585,12 +585,12 @@ func TestAccFSxLustreFileSystem_weeklyMaintenanceStartTime(t *testing.T) { func TestAccFSxLustreFileSystem_automaticBackupRetentionDays(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -633,12 +633,12 @@ func TestAccFSxLustreFileSystem_automaticBackupRetentionDays(t *testing.T) { func TestAccFSxLustreFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -674,12 +674,12 @@ func TestAccFSxLustreFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { func TestAccFSxLustreFileSystem_deploymentTypePersistent1(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -690,7 +690,7 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent1(t *testing.T) { testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), // per_unit_storage_throughput=50 is only available with deployment_type=PERSISTENT_1, so we test both here. resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "50"), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.LustreDeploymentTypePersistent1)), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", acctest.Ct0), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrKMSKeyID, "kms", regexache.MustCompile(`key/.+`)), // We don't know the randomly generated mount_name ahead of time like for SCRATCH_1 deployment types. @@ -713,12 +713,12 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent1(t *testing.T) { func TestAccFSxLustreFileSystem_deploymentTypePersistent1_perUnitStorageThroughput(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -729,7 +729,7 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent1_perUnitStorageThroughp testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), // per_unit_storage_throughput=50 is only available with deployment_type=PERSISTENT_1, so we test both here. resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "50"), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.LustreDeploymentTypePersistent1)), ), }, { @@ -756,12 +756,12 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent1_perUnitStorageThroughp func TestAccFSxLustreFileSystem_deploymentTypePersistent2(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -772,7 +772,7 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent2(t *testing.T) { testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), // per_unit_storage_throughput=125 is only available with deployment_type=PERSISTENT_2, so we test both here. resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "125"), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent2), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.LustreDeploymentTypePersistent2)), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", acctest.Ct0), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrKMSKeyID, "kms", regexache.MustCompile(`key/.+`)), // We don't know the randomly generated mount_name ahead of time like for SCRATCH_1 deployment types. @@ -795,12 +795,12 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent2(t *testing.T) { func TestAccFSxLustreFileSystem_deploymentTypePersistent2_perUnitStorageThroughput(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -811,7 +811,7 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent2_perUnitStorageThroughp testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), // per_unit_storage_throughput=125 is only available with deployment_type=PERSISTENT_2, so we test both here. resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "125"), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent2), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.LustreDeploymentTypePersistent2)), ), }, { @@ -838,12 +838,12 @@ func TestAccFSxLustreFileSystem_deploymentTypePersistent2_perUnitStorageThroughp func TestAccFSxLustreFileSystem_logConfig(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -882,12 +882,12 @@ func TestAccFSxLustreFileSystem_logConfig(t *testing.T) { func TestAccFSxLustreFileSystem_metadataConfig(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -924,12 +924,12 @@ func TestAccFSxLustreFileSystem_metadataConfig(t *testing.T) { func TestAccFSxLustreFileSystem_metadataConfig_increase(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -965,12 +965,12 @@ func TestAccFSxLustreFileSystem_metadataConfig_increase(t *testing.T) { func TestAccFSxLustreFileSystem_metadataConfig_decrease(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -1006,12 +1006,12 @@ func TestAccFSxLustreFileSystem_metadataConfig_decrease(t *testing.T) { func TestAccFSxLustreFileSystem_rootSquashConfig(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -1048,12 +1048,12 @@ func TestAccFSxLustreFileSystem_rootSquashConfig(t *testing.T) { func TestAccFSxLustreFileSystem_fromBackup(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -1063,7 +1063,7 @@ func TestAccFSxLustreFileSystem_fromBackup(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "50"), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.LustreDeploymentTypePersistent1)), resource.TestCheckResourceAttrPair(resourceName, "backup_id", "aws_fsx_backup.test", names.AttrID), ), }, @@ -1083,14 +1083,14 @@ func TestAccFSxLustreFileSystem_fromBackup(t *testing.T) { func TestAccFSxLustreFileSystem_kmsKeyID(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" kmsKeyResourceName1 := "aws_kms_key.test1" kmsKeyResourceName2 := "aws_kms_key.test2" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -1099,7 +1099,7 @@ func TestAccFSxLustreFileSystem_kmsKeyID(t *testing.T) { Config: testAccLustreFileSystemConfig_kmsKeyID1(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem1), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.LustreDeploymentTypePersistent1)), resource.TestCheckResourceAttrPair(resourceName, names.AttrKMSKeyID, kmsKeyResourceName1, names.AttrARN), ), }, @@ -1117,7 +1117,7 @@ func TestAccFSxLustreFileSystem_kmsKeyID(t *testing.T) { Config: testAccLustreFileSystemConfig_kmsKeyID2(rName), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem2), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.LustreDeploymentTypePersistent1)), testAccCheckLustreFileSystemRecreated(&filesystem1, &filesystem2), resource.TestCheckResourceAttrPair(resourceName, names.AttrKMSKeyID, kmsKeyResourceName2, names.AttrARN), ), @@ -1128,21 +1128,21 @@ func TestAccFSxLustreFileSystem_kmsKeyID(t *testing.T) { func TestAccFSxLustreFileSystem_deploymentTypeScratch2(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_deploymentType(rName, fsx.LustreDeploymentTypeScratch2), + Config: testAccLustreFileSystemConfig_deploymentType(rName, string(awstypes.LustreDeploymentTypeScratch2)), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypeScratch2), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.LustreDeploymentTypeScratch2)), // We don't know the randomly generated mount_name ahead of time like for SCRATCH_1 deployment types. resource.TestCheckResourceAttrSet(resourceName, "mount_name"), ), @@ -1163,22 +1163,22 @@ func TestAccFSxLustreFileSystem_deploymentTypeScratch2(t *testing.T) { func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheRead(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_hddStorageType(rName, fsx.DriveCacheTypeRead), + Config: testAccLustreFileSystemConfig_hddStorageType(rName, string(awstypes.DriveCacheTypeRead)), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, fsx.StorageTypeHdd), - resource.TestCheckResourceAttr(resourceName, "drive_cache_type", fsx.DriveCacheTypeRead), + resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, string(awstypes.StorageTypeHdd)), + resource.TestCheckResourceAttr(resourceName, "drive_cache_type", string(awstypes.DriveCacheTypeRead)), ), }, { @@ -1197,22 +1197,22 @@ func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheRead(t *testing.T) { func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheNone(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLustreFileSystemConfig_hddStorageType(rName, fsx.DriveCacheTypeNone), + Config: testAccLustreFileSystemConfig_hddStorageType(rName, string(awstypes.DriveCacheTypeNone)), Check: resource.ComposeTestCheckFunc( testAccCheckLustreFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, fsx.StorageTypeHdd), - resource.TestCheckResourceAttr(resourceName, "drive_cache_type", fsx.DriveCacheTypeNone), + resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, string(awstypes.StorageTypeHdd)), + resource.TestCheckResourceAttr(resourceName, "drive_cache_type", string(awstypes.DriveCacheTypeNone)), ), }, { @@ -1231,12 +1231,12 @@ func TestAccFSxLustreFileSystem_storageTypeHddDriveCacheNone(t *testing.T) { func TestAccFSxLustreFileSystem_copyTagsToBackups(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -1264,12 +1264,12 @@ func TestAccFSxLustreFileSystem_copyTagsToBackups(t *testing.T) { func TestAccFSxLustreFileSystem_autoImportPolicy(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_lustre_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -1302,14 +1302,14 @@ func TestAccFSxLustreFileSystem_autoImportPolicy(t *testing.T) { }) } -func testAccCheckLustreFileSystemExists(ctx context.Context, n string, v *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckLustreFileSystemExists(ctx context.Context, n string, v *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindLustreFileSystemByID(ctx, conn, rs.Primary.ID) @@ -1325,7 +1325,7 @@ func testAccCheckLustreFileSystemExists(ctx context.Context, n string, v *fsx.Fi func testAccCheckLustreFileSystemDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_lustre_file_system" { @@ -1349,20 +1349,20 @@ func testAccCheckLustreFileSystemDestroy(ctx context.Context) resource.TestCheck } } -func testAccCheckLustreFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckLustreFileSystemNotRecreated(i, j *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx for Lustre File System (%s) recreated", aws.StringValue(i.FileSystemId)) + if aws.ToString(i.FileSystemId) != aws.ToString(j.FileSystemId) { + return fmt.Errorf("FSx for Lustre File System (%s) recreated", aws.ToString(i.FileSystemId)) } return nil } } -func testAccCheckLustreFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckLustreFileSystemRecreated(i, j *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx for Lustre File System (%s) not recreated", aws.StringValue(i.FileSystemId)) + if aws.ToString(i.FileSystemId) == aws.ToString(j.FileSystemId) { + return fmt.Errorf("FSx for Lustre File System (%s) not recreated", aws.ToString(i.FileSystemId)) } return nil diff --git a/internal/service/fsx/ontap_file_system.go b/internal/service/fsx/ontap_file_system.go index 9ef217f7f436..a7b182196ce8 100644 --- a/internal/service/fsx/ontap_file_system.go +++ b/internal/service/fsx/ontap_file_system.go @@ -9,15 +9,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -66,10 +68,10 @@ func resourceONTAPFileSystem() *schema.Resource { ), }, "deployment_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(fsx.OntapDeploymentType_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.OntapDeploymentType](), }, "disk_iops_configuration": { Type: schema.TypeList, @@ -85,10 +87,10 @@ func resourceONTAPFileSystem() *schema.Resource { ValidateFunc: validation.IntBetween(0, 2400000), }, names.AttrMode: { - Type: schema.TypeString, - Optional: true, - Default: fsx.DiskIopsConfigurationModeAutomatic, - ValidateFunc: validation.StringInSlice(fsx.DiskIopsConfigurationMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DiskIopsConfigurationModeAutomatic, + ValidateDiagFunc: enum.Validate[awstypes.DiskIopsConfigurationMode](), }, }, }, @@ -201,11 +203,11 @@ func resourceONTAPFileSystem() *schema.Resource { ValidateFunc: validation.IntBetween(1024, 1024*1024), }, names.AttrStorageType: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: fsx.StorageTypeSsd, - ValidateFunc: validation.StringInSlice(fsx.StorageType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.StorageTypeSsd, + ValidateDiagFunc: enum.Validate[awstypes.StorageType](), }, names.AttrSubnetIDs: { Type: schema.TypeList, @@ -258,7 +260,7 @@ func resourceONTAPFileSystemThroughputCapacityPerHAPairCustomizeDiff(_ context.C // we want to force a new resource if the throughput_capacity_per_ha_pair is increased for Gen1 file systems if d.HasChange("throughput_capacity_per_ha_pair") { o, n := d.GetChange("throughput_capacity_per_ha_pair") - if n != nil && n.(int) != 0 && n.(int) > o.(int) && (d.Get("deployment_type").(string) == fsx.OntapDeploymentTypeSingleAz1 || d.Get("deployment_type").(string) == fsx.OntapDeploymentTypeMultiAz1) { + if n != nil && n.(int) != 0 && n.(int) > o.(int) && (d.Get("deployment_type").(string) == string(awstypes.OntapDeploymentTypeSingleAz1) || d.Get("deployment_type").(string) == string(awstypes.OntapDeploymentTypeMultiAz1)) { if err := d.ForceNew("throughput_capacity_per_ha_pair"); err != nil { return err } @@ -272,7 +274,7 @@ func resourceONTAPFileSystemHAPairsCustomizeDiff(_ context.Context, d *schema.Re // we want to force a new resource if the ha_pairs is increased for Gen1 single AZ file systems. multiple ha_pairs is not supported on Multi AZ. if d.HasChange("ha_pairs") { o, n := d.GetChange("ha_pairs") - if n != nil && n.(int) != 0 && n.(int) > o.(int) && (d.Get("deployment_type").(string) == fsx.OntapDeploymentTypeSingleAz1) { + if n != nil && n.(int) != 0 && n.(int) > o.(int) && (d.Get("deployment_type").(string) == string(awstypes.OntapDeploymentTypeSingleAz1)) { if err := d.ForceNew("ha_pairs"); err != nil { return err } @@ -284,19 +286,19 @@ func resourceONTAPFileSystemHAPairsCustomizeDiff(_ context.Context, d *schema.Re func resourceONTAPFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.CreateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), - FileSystemType: aws.String(fsx.FileSystemTypeOntap), - OntapConfiguration: &fsx.CreateFileSystemOntapConfiguration{ - AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), - DeploymentType: aws.String(d.Get("deployment_type").(string)), + FileSystemType: awstypes.FileSystemTypeOntap, + OntapConfiguration: &awstypes.CreateFileSystemOntapConfiguration{ + AutomaticBackupRetentionDays: aws.Int32(int32(d.Get("automatic_backup_retention_days").(int))), + DeploymentType: awstypes.OntapDeploymentType(d.Get("deployment_type").(string)), PreferredSubnetId: aws.String(d.Get("preferred_subnet_id").(string)), }, - StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), - StorageType: aws.String(d.Get(names.AttrStorageType).(string)), - SubnetIds: flex.ExpandStringList(d.Get(names.AttrSubnetIDs).([]interface{})), + StorageCapacity: aws.Int32(int32(d.Get("storage_capacity").(int))), + StorageType: awstypes.StorageType(d.Get(names.AttrStorageType).(string)), + SubnetIds: flex.ExpandStringValueList(d.Get(names.AttrSubnetIDs).([]interface{})), Tags: getTagsIn(ctx), } @@ -317,12 +319,12 @@ func resourceONTAPFileSystemCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("ha_pairs"); ok { - v := int64(v.(int)) - input.OntapConfiguration.HAPairs = aws.Int64(v) + v := int32(v.(int)) + input.OntapConfiguration.HAPairs = aws.Int32(v) if v > 0 { if v, ok := d.GetOk("throughput_capacity_per_ha_pair"); ok { - input.OntapConfiguration.ThroughputCapacityPerHAPair = aws.Int64(int64(v.(int))) + input.OntapConfiguration.ThroughputCapacityPerHAPair = aws.Int32(int32(v.(int))) } } } @@ -332,28 +334,28 @@ func resourceONTAPFileSystemCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("route_table_ids"); ok { - input.OntapConfiguration.RouteTableIds = flex.ExpandStringSet(v.(*schema.Set)) + input.OntapConfiguration.RouteTableIds = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk(names.AttrSecurityGroupIDs); ok { - input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("throughput_capacity"); ok { - input.OntapConfiguration.ThroughputCapacity = aws.Int64(int64(v.(int))) + input.OntapConfiguration.ThroughputCapacity = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { input.OntapConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) } - output, err := conn.CreateFileSystemWithContext(ctx, input) + output, err := conn.CreateFileSystem(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for NetApp ONTAP File System: %s", err) } - d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) + d.SetId(aws.ToString(output.FileSystem.FileSystemId)) if _, err := waitFileSystemCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP File System (%s) create: %s", d.Id(), err) @@ -364,7 +366,7 @@ func resourceONTAPFileSystemCreate(ctx context.Context, d *schema.ResourceData, func resourceONTAPFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) filesystem, err := findONTAPFileSystemByID(ctx, conn, d.Id()) @@ -393,17 +395,17 @@ func resourceONTAPFileSystemRead(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "setting endpoints: %s", err) } d.Set("fsx_admin_password", d.Get("fsx_admin_password").(string)) - haPairs := aws.Int64Value(ontapConfig.HAPairs) + haPairs := aws.ToInt32(ontapConfig.HAPairs) d.Set("ha_pairs", haPairs) d.Set(names.AttrKMSKeyID, filesystem.KmsKeyId) - d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)) + d.Set("network_interface_ids", filesystem.NetworkInterfaceIds) d.Set(names.AttrOwnerID, filesystem.OwnerId) d.Set("preferred_subnet_id", ontapConfig.PreferredSubnetId) - d.Set("route_table_ids", aws.StringValueSlice(ontapConfig.RouteTableIds)) + d.Set("route_table_ids", ontapConfig.RouteTableIds) d.Set("storage_capacity", filesystem.StorageCapacity) d.Set(names.AttrStorageType, filesystem.StorageType) - d.Set(names.AttrSubnetIDs, aws.StringValueSlice(filesystem.SubnetIds)) - if aws.StringValue(ontapConfig.DeploymentType) == fsx.OntapDeploymentTypeSingleAz2 { + d.Set(names.AttrSubnetIDs, filesystem.SubnetIds) + if ontapConfig.DeploymentType == awstypes.OntapDeploymentTypeSingleAz2 { d.Set("throughput_capacity", nil) d.Set("throughput_capacity_per_ha_pair", ontapConfig.ThroughputCapacityPerHAPair) } else { @@ -420,17 +422,17 @@ func resourceONTAPFileSystemRead(ctx context.Context, d *schema.ResourceData, me func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), - OntapConfiguration: &fsx.UpdateFileSystemOntapConfiguration{}, + OntapConfiguration: &awstypes.UpdateFileSystemOntapConfiguration{}, } if d.HasChange("automatic_backup_retention_days") { - input.OntapConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))) + input.OntapConfiguration.AutomaticBackupRetentionDays = aws.Int32(int32(d.Get("automatic_backup_retention_days").(int))) } if d.HasChange("daily_automatic_backup_start_time") { @@ -446,9 +448,9 @@ func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("ha_pairs") { - input.OntapConfiguration.HAPairs = aws.Int64(int64(d.Get("ha_pairs").(int))) + input.OntapConfiguration.HAPairs = aws.Int32(int32(d.Get("ha_pairs").(int))) //for the ONTAP update API the ThroughputCapacityPerHAPair must explicitly be passed when adding ha_pairs even if it hasn't changed. - input.OntapConfiguration.ThroughputCapacityPerHAPair = aws.Int64(int64(d.Get("throughput_capacity_per_ha_pair").(int))) + input.OntapConfiguration.ThroughputCapacityPerHAPair = aws.Int32(int32(d.Get("throughput_capacity_per_ha_pair").(int))) } if d.HasChange("route_table_ids") { @@ -457,24 +459,24 @@ func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, add, del := flex.ExpandStringValueSet(ns.Difference(os)), flex.ExpandStringValueSet(os.Difference(ns)) if len(add) > 0 { - input.OntapConfiguration.AddRouteTableIds = aws.StringSlice(add) + input.OntapConfiguration.AddRouteTableIds = add } if len(del) > 0 { - input.OntapConfiguration.RemoveRouteTableIds = aws.StringSlice(del) + input.OntapConfiguration.RemoveRouteTableIds = del } } if d.HasChange("storage_capacity") { - input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int))) + input.StorageCapacity = aws.Int32(int32(d.Get("storage_capacity").(int))) } if d.HasChange("throughput_capacity") { - input.OntapConfiguration.ThroughputCapacity = aws.Int64(int64(d.Get("throughput_capacity").(int))) + input.OntapConfiguration.ThroughputCapacity = aws.Int32(int32(d.Get("throughput_capacity").(int))) } if d.HasChange("throughput_capacity_per_ha_pair") { - input.OntapConfiguration.ThroughputCapacityPerHAPair = aws.Int64(int64(d.Get("throughput_capacity_per_ha_pair").(int))) + input.OntapConfiguration.ThroughputCapacityPerHAPair = aws.Int32(int32(d.Get("throughput_capacity_per_ha_pair").(int))) } if d.HasChange("weekly_maintenance_start_time") { @@ -482,7 +484,7 @@ func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, } startTime := time.Now() - _, err := conn.UpdateFileSystemWithContext(ctx, input) + _, err := conn.UpdateFileSystem(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for NetApp ONTAP File System (%s): %s", d.Id(), err) @@ -492,8 +494,8 @@ func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP File System (%s) update: %s", d.Id(), err) } - if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP File System (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, err) } } @@ -502,14 +504,14 @@ func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, func resourceONTAPFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) log.Printf("[DEBUG] Deleting FSx for NetApp ONTAP File System: %s", d.Id()) - _, err := conn.DeleteFileSystemWithContext(ctx, &fsx.DeleteFileSystemInput{ + _, err := conn.DeleteFileSystem(ctx, &fsx.DeleteFileSystemInput{ FileSystemId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { + if errs.IsA[*awstypes.FileSystemNotFound](err) { return diags } @@ -524,17 +526,17 @@ func resourceONTAPFileSystemDelete(ctx context.Context, d *schema.ResourceData, return diags } -func expandOntapFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { +func expandOntapFileDiskIopsConfiguration(cfg []interface{}) *awstypes.DiskIopsConfiguration { if len(cfg) < 1 { return nil } conf := cfg[0].(map[string]interface{}) - out := fsx.DiskIopsConfiguration{} + out := awstypes.DiskIopsConfiguration{} if v, ok := conf[names.AttrMode].(string); ok && len(v) > 0 { - out.Mode = aws.String(v) + out.Mode = awstypes.DiskIopsConfigurationMode(v) } if v, ok := conf[names.AttrIOPS].(int); ok { out.Iops = aws.Int64(int64(v)) @@ -543,23 +545,22 @@ func expandOntapFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfig return &out } -func flattenOntapFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { +func flattenOntapFileDiskIopsConfiguration(rs *awstypes.DiskIopsConfiguration) []interface{} { if rs == nil { return []interface{}{} } m := make(map[string]interface{}) - if rs.Mode != nil { - m[names.AttrMode] = aws.StringValue(rs.Mode) - } + m[names.AttrMode] = string(rs.Mode) + if rs.Iops != nil { - m[names.AttrIOPS] = aws.Int64Value(rs.Iops) + m[names.AttrIOPS] = aws.ToInt64(rs.Iops) } return []interface{}{m} } -func flattenOntapFileSystemEndpoints(rs *fsx.FileSystemEndpoints) []interface{} { +func flattenOntapFileSystemEndpoints(rs *awstypes.FileSystemEndpoints) []interface{} { if rs == nil { return []interface{}{} } @@ -575,24 +576,24 @@ func flattenOntapFileSystemEndpoints(rs *fsx.FileSystemEndpoints) []interface{} return []interface{}{m} } -func flattenOntapFileSystemEndpoint(rs *fsx.FileSystemEndpoint) []interface{} { +func flattenOntapFileSystemEndpoint(rs *awstypes.FileSystemEndpoint) []interface{} { if rs == nil { return []interface{}{} } m := make(map[string]interface{}) if rs.DNSName != nil { - m[names.AttrDNSName] = aws.StringValue(rs.DNSName) + m[names.AttrDNSName] = aws.ToString(rs.DNSName) } if rs.IpAddresses != nil { - m[names.AttrIPAddresses] = flex.FlattenStringSet(rs.IpAddresses) + m[names.AttrIPAddresses] = flex.FlattenStringValueSet(rs.IpAddresses) } return []interface{}{m} } -func findONTAPFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { - output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeOntap) +func findONTAPFileSystemByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.FileSystem, error) { + output, err := findFileSystemByIDAndType(ctx, conn, id, awstypes.FileSystemTypeOntap) if err != nil { return nil, err diff --git a/internal/service/fsx/ontap_file_system_data_source.go b/internal/service/fsx/ontap_file_system_data_source.go index cbbec1ce791d..d350efff9622 100644 --- a/internal/service/fsx/ontap_file_system_data_source.go +++ b/internal/service/fsx/ontap_file_system_data_source.go @@ -6,8 +6,8 @@ package fsx import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -173,7 +173,7 @@ func dataSourceONTAPFileSystem() *schema.Resource { func dataSourceONTAPFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) id := d.Get(names.AttrID).(string) filesystem, err := findONTAPFileSystemByID(ctx, conn, id) @@ -184,7 +184,7 @@ func dataSourceONTAPFileSystemRead(ctx context.Context, d *schema.ResourceData, ontapConfig := filesystem.OntapConfiguration - d.SetId(aws.StringValue(filesystem.FileSystemId)) + d.SetId(aws.ToString(filesystem.FileSystemId)) d.Set(names.AttrARN, filesystem.ResourceARN) d.Set("automatic_backup_retention_days", ontapConfig.AutomaticBackupRetentionDays) d.Set("daily_automatic_backup_start_time", ontapConfig.DailyAutomaticBackupStartTime) @@ -197,17 +197,17 @@ func dataSourceONTAPFileSystemRead(ctx context.Context, d *schema.ResourceData, if err := d.Set(names.AttrEndpoints, flattenOntapFileSystemEndpoints(ontapConfig.Endpoints)); err != nil { return sdkdiag.AppendErrorf(diags, "setting endpoints: %s", err) } - haPairs := aws.Int64Value(ontapConfig.HAPairs) + haPairs := aws.ToInt32(ontapConfig.HAPairs) d.Set("ha_pairs", haPairs) d.Set(names.AttrKMSKeyID, filesystem.KmsKeyId) - d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)) + d.Set("network_interface_ids", filesystem.NetworkInterfaceIds) d.Set(names.AttrOwnerID, filesystem.OwnerId) d.Set("preferred_subnet_id", ontapConfig.PreferredSubnetId) - d.Set("route_table_ids", aws.StringValueSlice(ontapConfig.RouteTableIds)) + d.Set("route_table_ids", ontapConfig.RouteTableIds) d.Set("storage_capacity", filesystem.StorageCapacity) d.Set(names.AttrStorageType, filesystem.StorageType) - d.Set(names.AttrSubnetIDs, aws.StringValueSlice(filesystem.SubnetIds)) - if aws.StringValue(ontapConfig.DeploymentType) == fsx.OntapDeploymentTypeSingleAz2 { + d.Set(names.AttrSubnetIDs, filesystem.SubnetIds) + if ontapConfig.DeploymentType == awstypes.OntapDeploymentTypeSingleAz2 { d.Set("throughput_capacity", nil) d.Set("throughput_capacity_per_ha_pair", ontapConfig.ThroughputCapacityPerHAPair) } else { diff --git a/internal/service/fsx/ontap_file_system_test.go b/internal/service/fsx/ontap_file_system_test.go index a67aa7a42fda..c6398ae970e0 100644 --- a/internal/service/fsx/ontap_file_system_test.go +++ b/internal/service/fsx/ontap_file_system_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,12 +23,12 @@ import ( func TestAccFSxONTAPFileSystem_basic(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -39,7 +39,7 @@ func TestAccFSxONTAPFileSystem_basic(t *testing.T) { testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "fsx", regexache.MustCompile(`file-system/fs-.+`)), resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", acctest.Ct0), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OntapDeploymentTypeMultiAz1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.OntapDeploymentTypeMultiAz1)), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.iops", "3072"), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "AUTOMATIC"), @@ -58,7 +58,7 @@ func TestAccFSxONTAPFileSystem_basic(t *testing.T) { resource.TestCheckTypeSetElemAttrPair(resourceName, "route_table_ids.*", "aws_vpc.test", "default_route_table_id"), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "1024"), - resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, fsx.StorageTypeSsd), + resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, string(awstypes.StorageTypeSsd)), resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test.0", names.AttrID), resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test.1", names.AttrID), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), @@ -80,12 +80,12 @@ func TestAccFSxONTAPFileSystem_basic(t *testing.T) { func TestAccFSxONTAPFileSystem_singleAZ(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -94,7 +94,7 @@ func TestAccFSxONTAPFileSystem_singleAZ(t *testing.T) { Config: testAccONTAPFileSystemConfig_singleAZ(rName), Check: resource.ComposeTestCheckFunc( testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OntapDeploymentTypeSingleAz1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.OntapDeploymentTypeSingleAz1)), ), }, { @@ -109,7 +109,7 @@ func TestAccFSxONTAPFileSystem_singleAZ(t *testing.T) { func TestAccFSxONTAPFileSystem_multiAZ2(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem throughput1 := 384 throughput2 := 768 throughput3 := 768 @@ -121,7 +121,7 @@ func TestAccFSxONTAPFileSystem_multiAZ2(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -130,7 +130,7 @@ func TestAccFSxONTAPFileSystem_multiAZ2(t *testing.T) { Config: testAccONTAPFileSystemConfig_multiAZ2(rName, throughput1, capacity1), Check: resource.ComposeTestCheckFunc( testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem1), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OntapDeploymentTypeMultiAz2), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.OntapDeploymentTypeMultiAz2)), resource.TestCheckResourceAttr(resourceName, "ha_pairs", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "throughput_capacity", fmt.Sprint(throughput1)), resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput1)), @@ -148,7 +148,7 @@ func TestAccFSxONTAPFileSystem_multiAZ2(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckONTAPFileSystemNotRecreated(&filesystem1, &filesystem2), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OntapDeploymentTypeMultiAz2), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.OntapDeploymentTypeMultiAz2)), resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput2)), resource.TestCheckResourceAttr(resourceName, "storage_capacity", fmt.Sprint(capacity2)), ), @@ -158,7 +158,7 @@ func TestAccFSxONTAPFileSystem_multiAZ2(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem2), testAccCheckONTAPFileSystemNotRecreated(&filesystem1, &filesystem2), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OntapDeploymentTypeMultiAz2), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.OntapDeploymentTypeMultiAz2)), resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput3)), resource.TestCheckResourceAttr(resourceName, "storage_capacity", fmt.Sprint(capacity3)), ), @@ -169,14 +169,14 @@ func TestAccFSxONTAPFileSystem_multiAZ2(t *testing.T) { func TestAccFSxONTAPFileSystem_haPair(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem throughput1 := 3072 throughput2 := 256 resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -211,7 +211,7 @@ func TestAccFSxONTAPFileSystem_haPair(t *testing.T) { func TestAccFSxONTAPFileSystem_haPair_increase(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem throughput := 3072 capacity1 := 4096 capacity2 := 8192 @@ -221,7 +221,7 @@ func TestAccFSxONTAPFileSystem_haPair_increase(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -259,14 +259,14 @@ func TestAccFSxONTAPFileSystem_haPair_increase(t *testing.T) { func TestAccFSxONTAPFileSystem_fsxAdminPassword(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) pass1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) pass2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -298,12 +298,12 @@ func TestAccFSxONTAPFileSystem_fsxAdminPassword(t *testing.T) { func TestAccFSxONTAPFileSystem_endpointIPAddressRange(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -327,12 +327,12 @@ func TestAccFSxONTAPFileSystem_endpointIPAddressRange(t *testing.T) { func TestAccFSxONTAPFileSystem_diskIOPS(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -367,12 +367,12 @@ func TestAccFSxONTAPFileSystem_diskIOPS(t *testing.T) { func TestAccFSxONTAPFileSystem_disappears(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -391,12 +391,12 @@ func TestAccFSxONTAPFileSystem_disappears(t *testing.T) { func TestAccFSxONTAPFileSystem_securityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -428,12 +428,12 @@ func TestAccFSxONTAPFileSystem_securityGroupIDs(t *testing.T) { func TestAccFSxONTAPFileSystem_routeTableIDs(t *testing.T) { ctx := acctest.Context(t) - var filesystem1 fsx.FileSystem + var filesystem1 awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -475,12 +475,12 @@ func TestAccFSxONTAPFileSystem_routeTableIDs(t *testing.T) { func TestAccFSxONTAPFileSystem_tags(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2, filesystem3 fsx.FileSystem + var filesystem1, filesystem2, filesystem3 awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -524,12 +524,12 @@ func TestAccFSxONTAPFileSystem_tags(t *testing.T) { func TestAccFSxONTAPFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -561,12 +561,12 @@ func TestAccFSxONTAPFileSystem_weeklyMaintenanceStartTime(t *testing.T) { func TestAccFSxONTAPFileSystem_automaticBackupRetentionDays(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -605,12 +605,12 @@ func TestAccFSxONTAPFileSystem_automaticBackupRetentionDays(t *testing.T) { func TestAccFSxONTAPFileSystem_kmsKeyID(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -634,12 +634,12 @@ func TestAccFSxONTAPFileSystem_kmsKeyID(t *testing.T) { func TestAccFSxONTAPFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLustreFileSystemDestroy(ctx), @@ -671,12 +671,12 @@ func TestAccFSxONTAPFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { func TestAccFSxONTAPFileSystem_throughputCapacity(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -717,7 +717,7 @@ func TestAccFSxONTAPFileSystem_throughputCapacity(t *testing.T) { func TestAccFSxONTAPFileSystem_throughputCapacity_singleAZ1(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem throughput1 := 128 throughput2 := 256 capacity := 1024 @@ -725,7 +725,7 @@ func TestAccFSxONTAPFileSystem_throughputCapacity_singleAZ1(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -757,7 +757,7 @@ func TestAccFSxONTAPFileSystem_throughputCapacity_singleAZ1(t *testing.T) { func TestAccFSxONTAPFileSystem_throughputCapacity_multiAZ1(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem throughput1 := 128 throughput2 := 256 capacity := 1024 @@ -765,7 +765,7 @@ func TestAccFSxONTAPFileSystem_throughputCapacity_multiAZ1(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -797,12 +797,12 @@ func TestAccFSxONTAPFileSystem_throughputCapacity_multiAZ1(t *testing.T) { func TestAccFSxONTAPFileSystem_storageCapacity(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_ontap_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), @@ -832,14 +832,14 @@ func TestAccFSxONTAPFileSystem_storageCapacity(t *testing.T) { }) } -func testAccCheckONTAPFileSystemExists(ctx context.Context, n string, v *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckONTAPFileSystemExists(ctx context.Context, n string, v *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindONTAPFileSystemByID(ctx, conn, rs.Primary.ID) @@ -855,7 +855,7 @@ func testAccCheckONTAPFileSystemExists(ctx context.Context, n string, v *fsx.Fil func testAccCheckONTAPFileSystemDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_ontap_file_system" { @@ -879,20 +879,20 @@ func testAccCheckONTAPFileSystemDestroy(ctx context.Context) resource.TestCheckF } } -func testAccCheckONTAPFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckONTAPFileSystemNotRecreated(i, j *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx for NetApp ONTAP File System (%s) recreated", aws.StringValue(i.FileSystemId)) + if aws.ToString(i.FileSystemId) != aws.ToString(j.FileSystemId) { + return fmt.Errorf("FSx for NetApp ONTAP File System (%s) recreated", aws.ToString(i.FileSystemId)) } return nil } } -func testAccCheckONTAPFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckONTAPFileSystemRecreated(i, j *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx for NetApp ONTAP File System (%s) not recreated", aws.StringValue(i.FileSystemId)) + if aws.ToString(i.FileSystemId) == aws.ToString(j.FileSystemId) { + return fmt.Errorf("FSx for NetApp ONTAP File System (%s) not recreated", aws.ToString(i.FileSystemId)) } return nil diff --git a/internal/service/fsx/ontap_storage_virtual_machine.go b/internal/service/fsx/ontap_storage_virtual_machine.go index 178d2ee4e9f4..f06e7b92adbc 100644 --- a/internal/service/fsx/ontap_storage_virtual_machine.go +++ b/internal/service/fsx/ontap_storage_virtual_machine.go @@ -9,15 +9,17 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" @@ -210,10 +212,10 @@ func resourceONTAPStorageVirtualMachine() *schema.Resource { ValidateFunc: validation.StringLenBetween(1, 47), }, "root_volume_security_style": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(fsx.StorageVirtualMachineRootVolumeSecurityStyle_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.StorageVirtualMachineRootVolumeSecurityStyle](), }, "subtype": { Type: schema.TypeString, @@ -239,7 +241,7 @@ func resourceONTAPStorageVirtualMachine() *schema.Resource { func resourceONTAPStorageVirtualMachineCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) name := d.Get(names.AttrName).(string) input := &fsx.CreateStorageVirtualMachineInput{ @@ -253,20 +255,20 @@ func resourceONTAPStorageVirtualMachineCreate(ctx context.Context, d *schema.Res } if v, ok := d.GetOk("root_volume_security_style"); ok { - input.RootVolumeSecurityStyle = aws.String(v.(string)) + input.RootVolumeSecurityStyle = awstypes.StorageVirtualMachineRootVolumeSecurityStyle(v.(string)) } if v, ok := d.GetOk("svm_admin_password"); ok { input.SvmAdminPassword = aws.String(v.(string)) } - output, err := conn.CreateStorageVirtualMachineWithContext(ctx, input) + output, err := conn.CreateStorageVirtualMachine(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx ONTAP Storage Virtual Machine (%s): %s", name, err) } - d.SetId(aws.StringValue(output.StorageVirtualMachine.StorageVirtualMachineId)) + d.SetId(aws.ToString(output.StorageVirtualMachine.StorageVirtualMachineId)) if _, err := waitStorageVirtualMachineCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx ONTAP Storage Virtual Machine (%s) create: %s", d.Id(), err) @@ -277,7 +279,7 @@ func resourceONTAPStorageVirtualMachineCreate(ctx context.Context, d *schema.Res func resourceONTAPStorageVirtualMachineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) storageVirtualMachine, err := findStorageVirtualMachineByID(ctx, conn, d.Id()) @@ -314,7 +316,7 @@ func resourceONTAPStorageVirtualMachineRead(ctx context.Context, d *schema.Resou func resourceONTAPStorageVirtualMachineUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &fsx.UpdateStorageVirtualMachineInput{ @@ -330,7 +332,7 @@ func resourceONTAPStorageVirtualMachineUpdate(ctx context.Context, d *schema.Res input.SvmAdminPassword = aws.String(d.Get("svm_admin_password").(string)) } - _, err := conn.UpdateStorageVirtualMachineWithContext(ctx, input) + _, err := conn.UpdateStorageVirtualMachine(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx ONTAP Storage Virtual Machine (%s): %s", d.Id(), err) @@ -346,14 +348,14 @@ func resourceONTAPStorageVirtualMachineUpdate(ctx context.Context, d *schema.Res func resourceONTAPStorageVirtualMachineDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) log.Printf("[DEBUG] Deleting FSx ONTAP Storage Virtual Machine: %s", d.Id()) - _, err := conn.DeleteStorageVirtualMachineWithContext(ctx, &fsx.DeleteStorageVirtualMachineInput{ + _, err := conn.DeleteStorageVirtualMachine(ctx, &fsx.DeleteStorageVirtualMachineInput{ StorageVirtualMachineId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeStorageVirtualMachineNotFound) { + if errs.IsA[*awstypes.StorageVirtualMachineNotFound](err) { return diags } @@ -368,14 +370,142 @@ func resourceONTAPStorageVirtualMachineDelete(ctx context.Context, d *schema.Res return diags } -func expandCreateSvmActiveDirectoryConfiguration(cfg []interface{}) *fsx.CreateSvmActiveDirectoryConfiguration { +func findStorageVirtualMachineByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.StorageVirtualMachine, error) { + input := &fsx.DescribeStorageVirtualMachinesInput{ + StorageVirtualMachineIds: []string{id}, + } + + return findStorageVirtualMachine(ctx, conn, input, tfslices.PredicateTrue[*awstypes.StorageVirtualMachine]()) +} + +func findStorageVirtualMachine(ctx context.Context, conn *fsx.Client, input *fsx.DescribeStorageVirtualMachinesInput, filter tfslices.Predicate[*awstypes.StorageVirtualMachine]) (*awstypes.StorageVirtualMachine, error) { + output, err := findStorageVirtualMachines(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findStorageVirtualMachines(ctx context.Context, conn *fsx.Client, input *fsx.DescribeStorageVirtualMachinesInput, filter tfslices.Predicate[*awstypes.StorageVirtualMachine]) ([]awstypes.StorageVirtualMachine, error) { + var output []awstypes.StorageVirtualMachine + + pages := fsx.NewDescribeStorageVirtualMachinesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.StorageVirtualMachineNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.StorageVirtualMachines { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func statusStorageVirtualMachine(ctx context.Context, conn *fsx.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findStorageVirtualMachineByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Lifecycle), nil + } +} + +func waitStorageVirtualMachineCreated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.StorageVirtualMachine, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.StorageVirtualMachineLifecycleCreating, awstypes.StorageVirtualMachineLifecyclePending), + Target: enum.Slice(awstypes.StorageVirtualMachineLifecycleCreated, awstypes.StorageVirtualMachineLifecycleMisconfigured), + Refresh: statusStorageVirtualMachine(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.StorageVirtualMachine); ok { + if reason := output.LifecycleTransitionReason; reason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(reason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitStorageVirtualMachineUpdated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.StorageVirtualMachine, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.StorageVirtualMachineLifecyclePending), + Target: enum.Slice(awstypes.StorageVirtualMachineLifecycleCreated, awstypes.StorageVirtualMachineLifecycleMisconfigured), + Refresh: statusStorageVirtualMachine(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.StorageVirtualMachine); ok { + if reason := output.LifecycleTransitionReason; reason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(reason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitStorageVirtualMachineDeleted(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.StorageVirtualMachine, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.StorageVirtualMachineLifecycleCreated, awstypes.StorageVirtualMachineLifecycleDeleting), + Target: []string{}, + Refresh: statusStorageVirtualMachine(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.StorageVirtualMachine); ok { + if reason := output.LifecycleTransitionReason; reason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(reason.Message))) + } + + return output, err + } + + return nil, err +} + +func expandCreateSvmActiveDirectoryConfiguration(cfg []interface{}) *awstypes.CreateSvmActiveDirectoryConfiguration { if len(cfg) < 1 { return nil } conf := cfg[0].(map[string]interface{}) - out := fsx.CreateSvmActiveDirectoryConfiguration{} + out := awstypes.CreateSvmActiveDirectoryConfiguration{} if v, ok := conf["netbios_name"].(string); ok && len(v) > 0 { out.NetBiosName = aws.String(v) @@ -388,17 +518,17 @@ func expandCreateSvmActiveDirectoryConfiguration(cfg []interface{}) *fsx.CreateS return &out } -func expandSelfManagedActiveDirectoryConfiguration(cfg []interface{}) *fsx.SelfManagedActiveDirectoryConfiguration { +func expandSelfManagedActiveDirectoryConfiguration(cfg []interface{}) *awstypes.SelfManagedActiveDirectoryConfiguration { if len(cfg) < 1 { return nil } conf := cfg[0].(map[string]interface{}) - out := fsx.SelfManagedActiveDirectoryConfiguration{} + out := awstypes.SelfManagedActiveDirectoryConfiguration{} if v, ok := conf["dns_ips"].(*schema.Set); ok { - out.DnsIps = flex.ExpandStringSet(v) + out.DnsIps = flex.ExpandStringValueSet(v) } if v, ok := conf[names.AttrDomainName].(string); ok && len(v) > 0 { @@ -424,14 +554,14 @@ func expandSelfManagedActiveDirectoryConfiguration(cfg []interface{}) *fsx.SelfM return &out } -func expandUpdateSvmActiveDirectoryConfiguration(cfg []interface{}) *fsx.UpdateSvmActiveDirectoryConfiguration { +func expandUpdateSvmActiveDirectoryConfiguration(cfg []interface{}) *awstypes.UpdateSvmActiveDirectoryConfiguration { if len(cfg) < 1 { return nil } conf := cfg[0].(map[string]interface{}) - out := fsx.UpdateSvmActiveDirectoryConfiguration{} + out := awstypes.UpdateSvmActiveDirectoryConfiguration{} if v, ok := conf["netbios_name"].(string); ok && len(v) > 0 { out.NetBiosName = aws.String(v) @@ -444,17 +574,17 @@ func expandUpdateSvmActiveDirectoryConfiguration(cfg []interface{}) *fsx.UpdateS return &out } -func expandSelfManagedActiveDirectoryConfigurationUpdates(cfg []interface{}) *fsx.SelfManagedActiveDirectoryConfigurationUpdates { +func expandSelfManagedActiveDirectoryConfigurationUpdates(cfg []interface{}) *awstypes.SelfManagedActiveDirectoryConfigurationUpdates { if len(cfg) < 1 { return nil } conf := cfg[0].(map[string]interface{}) - out := fsx.SelfManagedActiveDirectoryConfigurationUpdates{} + out := awstypes.SelfManagedActiveDirectoryConfigurationUpdates{} if v, ok := conf["dns_ips"].(*schema.Set); ok { - out.DnsIps = flex.ExpandStringSet(v) + out.DnsIps = flex.ExpandStringValueSet(v) } if v, ok := conf[names.AttrDomainName].(string); ok && len(v) > 0 { @@ -480,7 +610,7 @@ func expandSelfManagedActiveDirectoryConfigurationUpdates(cfg []interface{}) *fs return &out } -func flattenSvmActiveDirectoryConfiguration(d *schema.ResourceData, rs *fsx.SvmActiveDirectoryConfiguration) []interface{} { +func flattenSvmActiveDirectoryConfiguration(d *schema.ResourceData, rs *awstypes.SvmActiveDirectoryConfiguration) []interface{} { if rs == nil { return []interface{}{} } @@ -497,28 +627,28 @@ func flattenSvmActiveDirectoryConfiguration(d *schema.ResourceData, rs *fsx.SvmA return []interface{}{m} } -func flattenSelfManagedActiveDirectoryAttributes(d *schema.ResourceData, rs *fsx.SelfManagedActiveDirectoryAttributes) []interface{} { +func flattenSelfManagedActiveDirectoryAttributes(d *schema.ResourceData, rs *awstypes.SelfManagedActiveDirectoryAttributes) []interface{} { if rs == nil { return []interface{}{} } m := make(map[string]interface{}) if rs.DnsIps != nil { - m["dns_ips"] = aws.StringValueSlice(rs.DnsIps) + m["dns_ips"] = rs.DnsIps } if rs.DomainName != nil { - m[names.AttrDomainName] = aws.StringValue(rs.DomainName) + m[names.AttrDomainName] = aws.ToString(rs.DomainName) } if rs.OrganizationalUnitDistinguishedName != nil { if _, ok := d.GetOk("active_directory_configuration.0.self_managed_active_directory_configuration.0.organizational_unit_distinguished_name"); ok { - m["organizational_unit_distinguished_name"] = aws.StringValue(rs.OrganizationalUnitDistinguishedName) + m["organizational_unit_distinguished_name"] = aws.ToString(rs.OrganizationalUnitDistinguishedName) } } if rs.UserName != nil { - m[names.AttrUsername] = aws.StringValue(rs.UserName) + m[names.AttrUsername] = aws.ToString(rs.UserName) } // Since we are in a configuration block and the FSx API does not return @@ -536,7 +666,7 @@ func flattenSelfManagedActiveDirectoryAttributes(d *schema.ResourceData, rs *fsx return []interface{}{m} } -func flattenSvmEndpoints(rs *fsx.SvmEndpoints) []interface{} { +func flattenSvmEndpoints(rs *awstypes.SvmEndpoints) []interface{} { if rs == nil { return []interface{}{} } @@ -557,149 +687,18 @@ func flattenSvmEndpoints(rs *fsx.SvmEndpoints) []interface{} { return []interface{}{m} } -func flattenSvmEndpoint(rs *fsx.SvmEndpoint) []interface{} { +func flattenSvmEndpoint(rs *awstypes.SvmEndpoint) []interface{} { if rs == nil { return []interface{}{} } m := make(map[string]interface{}) if rs.DNSName != nil { - m[names.AttrDNSName] = aws.StringValue(rs.DNSName) + m[names.AttrDNSName] = aws.ToString(rs.DNSName) } if rs.IpAddresses != nil { - m[names.AttrIPAddresses] = flex.FlattenStringSet(rs.IpAddresses) + m[names.AttrIPAddresses] = flex.FlattenStringValueSet(rs.IpAddresses) } return []interface{}{m} } - -func findStorageVirtualMachineByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.StorageVirtualMachine, error) { - input := &fsx.DescribeStorageVirtualMachinesInput{ - StorageVirtualMachineIds: []*string{aws.String(id)}, - } - - return findStorageVirtualMachine(ctx, conn, input, tfslices.PredicateTrue[*fsx.StorageVirtualMachine]()) -} - -func findStorageVirtualMachine(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeStorageVirtualMachinesInput, filter tfslices.Predicate[*fsx.StorageVirtualMachine]) (*fsx.StorageVirtualMachine, error) { - output, err := findStorageVirtualMachines(ctx, conn, input, filter) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - -func findStorageVirtualMachines(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeStorageVirtualMachinesInput, filter tfslices.Predicate[*fsx.StorageVirtualMachine]) ([]*fsx.StorageVirtualMachine, error) { - var output []*fsx.StorageVirtualMachine - - err := conn.DescribeStorageVirtualMachinesPagesWithContext(ctx, input, func(page *fsx.DescribeStorageVirtualMachinesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.StorageVirtualMachines { - if v != nil && filter(v) { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeStorageVirtualMachineNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - return output, nil -} - -func statusStorageVirtualMachine(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := findStorageVirtualMachineByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Lifecycle), nil - } -} - -func waitStorageVirtualMachineCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.StorageVirtualMachine, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.StorageVirtualMachineLifecycleCreating, fsx.StorageVirtualMachineLifecyclePending}, - Target: []string{fsx.StorageVirtualMachineLifecycleCreated, fsx.StorageVirtualMachineLifecycleMisconfigured}, - Refresh: statusStorageVirtualMachine(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.StorageVirtualMachine); ok { - if reason := output.LifecycleTransitionReason; reason != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(reason.Message))) - } - - return output, err - } - - return nil, err -} - -func waitStorageVirtualMachineUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.StorageVirtualMachine, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.StorageVirtualMachineLifecyclePending}, - Target: []string{fsx.StorageVirtualMachineLifecycleCreated, fsx.StorageVirtualMachineLifecycleMisconfigured}, - Refresh: statusStorageVirtualMachine(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.StorageVirtualMachine); ok { - if reason := output.LifecycleTransitionReason; reason != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(reason.Message))) - } - - return output, err - } - - return nil, err -} - -func waitStorageVirtualMachineDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.StorageVirtualMachine, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.StorageVirtualMachineLifecycleCreated, fsx.StorageVirtualMachineLifecycleDeleting}, - Target: []string{}, - Refresh: statusStorageVirtualMachine(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.StorageVirtualMachine); ok { - if reason := output.LifecycleTransitionReason; reason != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(reason.Message))) - } - - return output, err - } - - return nil, err -} diff --git a/internal/service/fsx/ontap_storage_virtual_machine_data_source.go b/internal/service/fsx/ontap_storage_virtual_machine_data_source.go index d0d0e4a2b1c2..aedd8bd36452 100644 --- a/internal/service/fsx/ontap_storage_virtual_machine_data_source.go +++ b/internal/service/fsx/ontap_storage_virtual_machine_data_source.go @@ -7,8 +7,9 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -19,7 +20,6 @@ import ( ) // @SDKDataSource("aws_fsx_ontap_storage_virtual_machine", name="ONTAP Storage Virtual Machine") -// @Tags func dataSourceONTAPStorageVirtualMachine() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceONTAPStorageVirtualMachineRead, @@ -195,14 +195,13 @@ func dataSourceONTAPStorageVirtualMachine() *schema.Resource { func dataSourceONTAPStorageVirtualMachineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + conn := meta.(*conns.AWSClient).FSxClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig input := &fsx.DescribeStorageVirtualMachinesInput{} if v, ok := d.GetOk(names.AttrID); ok { - input.StorageVirtualMachineIds = aws.StringSlice([]string{v.(string)}) + input.StorageVirtualMachineIds = []string{v.(string)} } input.Filters = newStorageVirtualMachineFilterList( @@ -213,17 +212,18 @@ func dataSourceONTAPStorageVirtualMachineRead(ctx context.Context, d *schema.Res input.Filters = nil } - svm, err := findStorageVirtualMachine(ctx, conn, input, tfslices.PredicateTrue[*fsx.StorageVirtualMachine]()) + svm, err := findStorageVirtualMachine(ctx, conn, input, tfslices.PredicateTrue[*awstypes.StorageVirtualMachine]()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading FSx ONTAP Storage Virtual Machine: %s", err) } - d.SetId(aws.StringValue(svm.StorageVirtualMachineId)) + d.SetId(aws.ToString(svm.StorageVirtualMachineId)) if err := d.Set("active_directory_configuration", flattenSvmActiveDirectoryConfiguration(d, svm.ActiveDirectoryConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting active_directory_configuration: %s", err) } - d.Set(names.AttrARN, svm.ResourceARN) + arn := aws.ToString(svm.ResourceARN) + d.Set(names.AttrARN, arn) d.Set(names.AttrCreationTime, svm.CreationTime.Format(time.RFC3339)) if err := d.Set(names.AttrEndpoints, flattenSvmEndpoints(svm.Endpoints)); err != nil { return sdkdiag.AppendErrorf(diags, "setting endpoints: %s", err) @@ -240,17 +240,20 @@ func dataSourceONTAPStorageVirtualMachineRead(ctx context.Context, d *schema.Res // SVM tags aren't set in the Describe response. // setTagsOut(ctx, svm.Tags) - tags := KeyValueTags(ctx, svm.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + tags, err := listTags(ctx, conn, arn) - //lintignore:AWSR002 - if err := d.Set(names.AttrTags, tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing tags for ONTAP Storage Virtual Machine (%s): %s", arn, err) + } + + if err := d.Set(names.AttrTags, tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) } return diags } -func flattenLifecycleTransitionReason(rs *fsx.LifecycleTransitionReason) []interface{} { +func flattenLifecycleTransitionReason(rs *awstypes.LifecycleTransitionReason) []interface{} { if rs == nil { return []interface{}{} } @@ -258,7 +261,7 @@ func flattenLifecycleTransitionReason(rs *fsx.LifecycleTransitionReason) []inter m := make(map[string]interface{}) if rs.Message != nil { - m[names.AttrMessage] = aws.StringValue(rs.Message) + m[names.AttrMessage] = aws.ToString(rs.Message) } return []interface{}{m} diff --git a/internal/service/fsx/ontap_storage_virtual_machine_data_source_test.go b/internal/service/fsx/ontap_storage_virtual_machine_data_source_test.go index 22d6354d6ef2..a9002e88ae89 100644 --- a/internal/service/fsx/ontap_storage_virtual_machine_data_source_test.go +++ b/internal/service/fsx/ontap_storage_virtual_machine_data_source_test.go @@ -4,6 +4,7 @@ package fsx_test import ( + "fmt" "testing" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -81,15 +82,33 @@ func TestAccFSxONTAPStorageVirtualMachineDataSource_Filter(t *testing.T) { } func testAccONTAPStorageVirtualMachineDataSourceConfig_Id(rName string) string { - return acctest.ConfigCompose(testAccONTAPStorageVirtualMachineConfig_basic(rName), ` + return acctest.ConfigCompose(testAccONTAPStorageVirtualMachineConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_storage_virtual_machine" "test" { + file_system_id = aws_fsx_ontap_file_system.test.id + name = %[1]q + + tags = { + Name = %[1]q + } +} + data "aws_fsx_ontap_storage_virtual_machine" "test" { id = aws_fsx_ontap_storage_virtual_machine.test.id } -`) +`, rName)) } func testAccONTAPStorageVirtualMachineDataSourceConfig_Filter(rName string) string { - return acctest.ConfigCompose(testAccONTAPStorageVirtualMachineConfig_basic(rName), ` + return acctest.ConfigCompose(testAccONTAPStorageVirtualMachineConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_storage_virtual_machine" "test" { + file_system_id = aws_fsx_ontap_file_system.test.id + name = %[1]q + + tags = { + Name = %[1]q + } +} + data "aws_fsx_ontap_storage_virtual_machine" "test" { filter { name = "file-system-id" @@ -98,5 +117,5 @@ data "aws_fsx_ontap_storage_virtual_machine" "test" { depends_on = [aws_fsx_ontap_storage_virtual_machine.test] } -`) +`, rName)) } diff --git a/internal/service/fsx/ontap_storage_virtual_machine_migrate.go b/internal/service/fsx/ontap_storage_virtual_machine_migrate.go index 22c16c3d75b1..44d9634bd80b 100644 --- a/internal/service/fsx/ontap_storage_virtual_machine_migrate.go +++ b/internal/service/fsx/ontap_storage_virtual_machine_migrate.go @@ -8,9 +8,10 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/enum" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -185,10 +186,10 @@ func resourceONTAPStorageVirtualMachineV0() *schema.Resource { ValidateFunc: validation.StringLenBetween(1, 47), }, "root_volume_security_style": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(fsx.StorageVirtualMachineRootVolumeSecurityStyle_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.StorageVirtualMachineRootVolumeSecurityStyle](), }, "subtype": { Type: schema.TypeString, diff --git a/internal/service/fsx/ontap_storage_virtual_machine_test.go b/internal/service/fsx/ontap_storage_virtual_machine_test.go index e35dc99ecb51..3c3cd6cba9df 100644 --- a/internal/service/fsx/ontap_storage_virtual_machine_test.go +++ b/internal/service/fsx/ontap_storage_virtual_machine_test.go @@ -10,8 +10,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -24,12 +24,12 @@ import ( func TestAccFSxONTAPStorageVirtualMachine_basic(t *testing.T) { ctx := acctest.Context(t) - var storageVirtualMachine fsx.StorageVirtualMachine + var storageVirtualMachine awstypes.StorageVirtualMachine resourceName := "aws_fsx_ontap_storage_virtual_machine.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPStorageVirtualMachineDestroy(ctx), @@ -48,7 +48,7 @@ func TestAccFSxONTAPStorageVirtualMachine_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "endpoints.0.nfs.0.dns_name"), resource.TestCheckResourceAttrSet(resourceName, names.AttrFileSystemID), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, "subtype", fsx.StorageVirtualMachineSubtypeDefault), + resource.TestCheckResourceAttr(resourceName, "subtype", string(awstypes.StorageVirtualMachineSubtypeDefault)), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), resource.TestCheckResourceAttrSet(resourceName, "uuid"), ), @@ -64,12 +64,12 @@ func TestAccFSxONTAPStorageVirtualMachine_basic(t *testing.T) { func TestAccFSxONTAPStorageVirtualMachine_rootVolumeSecurityStyle(t *testing.T) { ctx := acctest.Context(t) - var storageVirtualMachine fsx.StorageVirtualMachine + var storageVirtualMachine awstypes.StorageVirtualMachine resourceName := "aws_fsx_ontap_storage_virtual_machine.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPStorageVirtualMachineDestroy(ctx), @@ -102,14 +102,14 @@ func TestAccFSxONTAPStorageVirtualMachine_rootVolumeSecurityStyle(t *testing.T) func TestAccFSxONTAPStorageVirtualMachine_svmAdminPassword(t *testing.T) { ctx := acctest.Context(t) - var storageVirtualMachine1, storageVirtualMachine2 fsx.StorageVirtualMachine + var storageVirtualMachine1, storageVirtualMachine2 awstypes.StorageVirtualMachine resourceName := "aws_fsx_ontap_storage_virtual_machine.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) pass1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) pass2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPStorageVirtualMachineDestroy(ctx), @@ -141,12 +141,12 @@ func TestAccFSxONTAPStorageVirtualMachine_svmAdminPassword(t *testing.T) { func TestAccFSxONTAPStorageVirtualMachine_disappears(t *testing.T) { ctx := acctest.Context(t) - var storageVirtualMachine fsx.StorageVirtualMachine + var storageVirtualMachine awstypes.StorageVirtualMachine resourceName := "aws_fsx_ontap_storage_virtual_machine.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPStorageVirtualMachineDestroy(ctx), @@ -165,13 +165,13 @@ func TestAccFSxONTAPStorageVirtualMachine_disappears(t *testing.T) { func TestAccFSxONTAPStorageVirtualMachine_name(t *testing.T) { ctx := acctest.Context(t) - var storageVirtualMachine1, storageVirtualMachine2 fsx.StorageVirtualMachine + var storageVirtualMachine1, storageVirtualMachine2 awstypes.StorageVirtualMachine resourceName := "aws_fsx_ontap_storage_virtual_machine.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPStorageVirtualMachineDestroy(ctx), @@ -202,12 +202,12 @@ func TestAccFSxONTAPStorageVirtualMachine_name(t *testing.T) { func TestAccFSxONTAPStorageVirtualMachine_tags(t *testing.T) { ctx := acctest.Context(t) - var storageVirtualMachine1, storageVirtualMachine2, storageVirtualMachine3 fsx.StorageVirtualMachine + var storageVirtualMachine1, storageVirtualMachine2, storageVirtualMachine3 awstypes.StorageVirtualMachine resourceName := "aws_fsx_ontap_storage_virtual_machine.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPStorageVirtualMachineDestroy(ctx), @@ -250,7 +250,7 @@ func TestAccFSxONTAPStorageVirtualMachine_tags(t *testing.T) { func TestAccFSxONTAPStorageVirtualMachine_activeDirectoryCreate(t *testing.T) { ctx := acctest.Context(t) - var storageVirtualMachine fsx.StorageVirtualMachine + var storageVirtualMachine awstypes.StorageVirtualMachine resourceName := "aws_fsx_ontap_storage_virtual_machine.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) netBiosName := "tftest-" + sdkacctest.RandString(7) @@ -259,7 +259,7 @@ func TestAccFSxONTAPStorageVirtualMachine_activeDirectoryCreate(t *testing.T) { domainPassword := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPStorageVirtualMachineDestroy(ctx), @@ -293,7 +293,7 @@ func TestAccFSxONTAPStorageVirtualMachine_activeDirectoryCreate(t *testing.T) { func TestAccFSxONTAPStorageVirtualMachine_activeDirectoryJoin(t *testing.T) { ctx := acctest.Context(t) - var storageVirtualMachine1, storageVirtualMachine2 fsx.StorageVirtualMachine + var storageVirtualMachine1, storageVirtualMachine2 awstypes.StorageVirtualMachine resourceName := "aws_fsx_ontap_storage_virtual_machine.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) netBiosName := "tftest-" + sdkacctest.RandString(7) @@ -302,7 +302,7 @@ func TestAccFSxONTAPStorageVirtualMachine_activeDirectoryJoin(t *testing.T) { domainPassword := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPStorageVirtualMachineDestroy(ctx), @@ -332,14 +332,14 @@ func TestAccFSxONTAPStorageVirtualMachine_activeDirectoryJoin(t *testing.T) { }) } -func testAccCheckONTAPStorageVirtualMachineExists(ctx context.Context, n string, v *fsx.StorageVirtualMachine) resource.TestCheckFunc { +func testAccCheckONTAPStorageVirtualMachineExists(ctx context.Context, n string, v *awstypes.StorageVirtualMachine) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindStorageVirtualMachineByID(ctx, conn, rs.Primary.ID) @@ -355,7 +355,7 @@ func testAccCheckONTAPStorageVirtualMachineExists(ctx context.Context, n string, func testAccCheckONTAPStorageVirtualMachineDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_storage_virtual_machine" { @@ -375,20 +375,20 @@ func testAccCheckONTAPStorageVirtualMachineDestroy(ctx context.Context) resource } } -func testAccCheckONTAPStorageVirtualMachineNotRecreated(i, j *fsx.StorageVirtualMachine) resource.TestCheckFunc { +func testAccCheckONTAPStorageVirtualMachineNotRecreated(i, j *awstypes.StorageVirtualMachine) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.StorageVirtualMachineId) != aws.StringValue(j.StorageVirtualMachineId) { - return fmt.Errorf("FSx ONTAP Storage Virtual Machine (%s) recreated", aws.StringValue(i.StorageVirtualMachineId)) + if aws.ToString(i.StorageVirtualMachineId) != aws.ToString(j.StorageVirtualMachineId) { + return fmt.Errorf("FSx ONTAP Storage Virtual Machine (%s) recreated", aws.ToString(i.StorageVirtualMachineId)) } return nil } } -func testAccCheckONTAPStorageVirtualMachineRecreated(i, j *fsx.StorageVirtualMachine) resource.TestCheckFunc { +func testAccCheckONTAPStorageVirtualMachineRecreated(i, j *awstypes.StorageVirtualMachine) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.StorageVirtualMachineId) == aws.StringValue(j.StorageVirtualMachineId) { - return fmt.Errorf("FSx ONTAP Storage Virtual Machine (%s) not recreated", aws.StringValue(i.StorageVirtualMachineId)) + if aws.ToString(i.StorageVirtualMachineId) == aws.ToString(j.StorageVirtualMachineId) { + return fmt.Errorf("FSx ONTAP Storage Virtual Machine (%s) not recreated", aws.ToString(i.StorageVirtualMachineId)) } return nil diff --git a/internal/service/fsx/ontap_storage_virtual_machines_data_source.go b/internal/service/fsx/ontap_storage_virtual_machines_data_source.go index cf4c9444f614..9951b0351faa 100644 --- a/internal/service/fsx/ontap_storage_virtual_machines_data_source.go +++ b/internal/service/fsx/ontap_storage_virtual_machines_data_source.go @@ -6,8 +6,9 @@ package fsx import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -34,7 +35,7 @@ func dataSourceONTAPStorageVirtualMachines() *schema.Resource { func dataSourceONTAPStorageVirtualMachinesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.DescribeStorageVirtualMachinesInput{} @@ -46,15 +47,15 @@ func dataSourceONTAPStorageVirtualMachinesRead(ctx context.Context, d *schema.Re input.Filters = nil } - svms, err := findStorageVirtualMachines(ctx, conn, input, tfslices.PredicateTrue[*fsx.StorageVirtualMachine]()) + svms, err := findStorageVirtualMachines(ctx, conn, input, tfslices.PredicateTrue[*awstypes.StorageVirtualMachine]()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading FSx ONTAP Storage Virtual Machines: %s", err) } d.SetId(meta.(*conns.AWSClient).Region) - d.Set(names.AttrIDs, tfslices.ApplyToAll(svms, func(svm *fsx.StorageVirtualMachine) string { - return aws.StringValue(svm.StorageVirtualMachineId) + d.Set(names.AttrIDs, tfslices.ApplyToAll(svms, func(svm awstypes.StorageVirtualMachine) string { + return aws.ToString(svm.StorageVirtualMachineId) })) return diags diff --git a/internal/service/fsx/ontap_volume.go b/internal/service/fsx/ontap_volume.go index dda4d21d64c5..9c54a82ea28e 100644 --- a/internal/service/fsx/ontap_volume.go +++ b/internal/service/fsx/ontap_volume.go @@ -11,15 +11,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" @@ -122,17 +124,17 @@ func resourceONTAPVolume() *schema.Resource { ValidateFunc: validation.StringLenBetween(1, 203), }, "ontap_volume_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(fsx.InputOntapVolumeType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.InputOntapVolumeType](), }, "security_style": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(fsx.StorageVirtualMachineRootVolumeSecurityStyle_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.StorageVirtualMachineRootVolumeSecurityStyle](), }, "size_in_bytes": { Type: nullable.TypeNullableInt, @@ -173,10 +175,10 @@ func resourceONTAPVolume() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrType: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(fsx.AutocommitPeriodType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.AutocommitPeriodType](), }, names.AttrValue: { Type: schema.TypeInt, @@ -187,10 +189,10 @@ func resourceONTAPVolume() *schema.Resource { }, }, "privileged_delete": { - Type: schema.TypeString, - Optional: true, - Default: fsx.PrivilegedDeleteDisabled, - ValidateFunc: validation.StringInSlice(fsx.PrivilegedDelete_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.PrivilegedDeleteDisabled, + ValidateDiagFunc: enum.Validate[awstypes.PrivilegedDelete](), }, names.AttrRetentionPeriod: { Type: schema.TypeList, @@ -208,10 +210,10 @@ func resourceONTAPVolume() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrType: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.RetentionPeriodType](), }, names.AttrValue: { Type: schema.TypeInt, @@ -229,10 +231,10 @@ func resourceONTAPVolume() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrType: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.RetentionPeriodType](), }, names.AttrValue: { Type: schema.TypeInt, @@ -250,10 +252,10 @@ func resourceONTAPVolume() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrType: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(fsx.RetentionPeriodType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.RetentionPeriodType](), }, names.AttrValue: { Type: schema.TypeInt, @@ -267,10 +269,10 @@ func resourceONTAPVolume() *schema.Resource { }, }, "snaplock_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(fsx.SnaplockType_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.SnaplockType](), }, "volume_append_mode_enabled": { Type: schema.TypeBool, @@ -310,10 +312,10 @@ func resourceONTAPVolume() *schema.Resource { ValidateFunc: validation.IntBetween(2, 183), }, names.AttrName: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(fsx.TieringPolicyName_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.TieringPolicyName](), }, }, }, @@ -325,18 +327,18 @@ func resourceONTAPVolume() *schema.Resource { Computed: true, }, "volume_style": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(fsx.VolumeStyle_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.VolumeStyle](), }, names.AttrVolumeType: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: fsx.VolumeTypeOntap, - ValidateFunc: validation.StringInSlice(fsx.VolumeType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.VolumeTypeOntap, + ValidateDiagFunc: enum.Validate[awstypes.VolumeType](), }, }, CustomizeDiff: verify.SetTagsDiff, @@ -345,9 +347,9 @@ func resourceONTAPVolume() *schema.Resource { func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) - ontapConfig := &fsx.CreateOntapVolumeConfiguration{ + ontapConfig := &awstypes.CreateOntapVolumeConfiguration{ StorageVirtualMachineId: aws.String(d.Get("storage_virtual_machine_id").(string)), } @@ -364,11 +366,11 @@ func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk("ontap_volume_type"); ok { - ontapConfig.OntapVolumeType = aws.String(v.(string)) + ontapConfig.OntapVolumeType = awstypes.InputOntapVolumeType(v.(string)) } if v, ok := d.GetOk("security_style"); ok { - ontapConfig.SecurityStyle = aws.String(v.(string)) + ontapConfig.SecurityStyle = awstypes.SecurityStyle(v.(string)) } if v, null, _ := nullable.Int(d.Get("size_in_bytes").(string)).ValueInt64(); !null && v > 0 { @@ -376,7 +378,7 @@ func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk("size_in_megabytes"); ok { - ontapConfig.SizeInMegabytes = aws.Int64(int64(v.(int))) + ontapConfig.SizeInMegabytes = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("snaplock_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -396,7 +398,7 @@ func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk("volume_style"); ok { - ontapConfig.VolumeStyle = aws.String(v.(string)) + ontapConfig.VolumeStyle = awstypes.VolumeStyle(v.(string)) } name := d.Get(names.AttrName).(string) @@ -404,16 +406,16 @@ func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta Name: aws.String(name), OntapConfiguration: ontapConfig, Tags: getTagsIn(ctx), - VolumeType: aws.String(d.Get(names.AttrVolumeType).(string)), + VolumeType: awstypes.VolumeType(d.Get(names.AttrVolumeType).(string)), } - output, err := conn.CreateVolumeWithContext(ctx, input) + output, err := conn.CreateVolume(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for NetApp ONTAP Volume (%s): %s", name, err) } - d.SetId(aws.StringValue(output.Volume.VolumeId)) + d.SetId(aws.ToString(output.Volume.VolumeId)) if _, err := waitVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) create: %s", d.Id(), err) @@ -424,7 +426,7 @@ func resourceONTAPVolumeCreate(ctx context.Context, d *schema.ResourceData, meta func resourceONTAPVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) volume, err := findONTAPVolumeByID(ctx, conn, d.Id()) @@ -485,7 +487,7 @@ func resourceONTAPVolumeRead(ctx context.Context, d *schema.ResourceData, meta i func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChangesExcept( "final_backup_tags", @@ -493,7 +495,7 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta names.AttrTags, names.AttrTagsAll, ) { - ontapConfig := &fsx.UpdateOntapVolumeConfiguration{} + ontapConfig := &awstypes.UpdateOntapVolumeConfiguration{} if d.HasChange("copy_tags_to_backups") { ontapConfig.CopyTagsToBackups = aws.Bool(d.Get("copy_tags_to_backups").(bool)) @@ -504,7 +506,7 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange("security_style") { - ontapConfig.SecurityStyle = aws.String(d.Get("security_style").(string)) + ontapConfig.SecurityStyle = awstypes.SecurityStyle(d.Get("security_style").(string)) } if d.HasChange("size_in_bytes") { @@ -514,7 +516,7 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange("size_in_megabytes") { - ontapConfig.SizeInMegabytes = aws.Int64(int64(d.Get("size_in_megabytes").(int))) + ontapConfig.SizeInMegabytes = aws.Int32(int32(d.Get("size_in_megabytes").(int))) } if d.HasChange("snaplock_configuration") { @@ -544,7 +546,7 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta } startTime := time.Now() - _, err := conn.UpdateVolumeWithContext(ctx, input) + _, err := conn.UpdateVolume(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for NetApp ONTAP Volume (%s): %s", d.Id(), err) @@ -554,8 +556,8 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) update: %s", d.Id(), err) } - if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, err) + if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for NetApp ONTAP Volume (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeVolumeUpdate, err) } } @@ -564,10 +566,10 @@ func resourceONTAPVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceONTAPVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.DeleteVolumeInput{ - OntapConfiguration: &fsx.DeleteVolumeOntapConfiguration{ + OntapConfiguration: &awstypes.DeleteVolumeOntapConfiguration{ BypassSnaplockEnterpriseRetention: aws.Bool(d.Get("bypass_snaplock_enterprise_retention").(bool)), SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), }, @@ -579,9 +581,9 @@ func resourceONTAPVolumeDelete(ctx context.Context, d *schema.ResourceData, meta } log.Printf("[DEBUG] Deleting FSx for NetApp ONTAP Volume: %s", d.Id()) - _, err := conn.DeleteVolumeWithContext(ctx, input) + _, err := conn.DeleteVolume(ctx, input) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { + if errs.IsA[*awstypes.VolumeNotFound](err) { return diags } @@ -596,48 +598,272 @@ func resourceONTAPVolumeDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func expandAggregateConfiguration(tfMap map[string]interface{}) *fsx.CreateAggregateConfiguration { +func findONTAPVolumeByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.Volume, error) { + output, err := findVolumeByIDAndType(ctx, conn, id, awstypes.VolumeTypeOntap) + + if err != nil { + return nil, err + } + + if output.OntapConfiguration == nil { + return nil, tfresource.NewEmptyResultError(nil) + } + + return output, nil +} + +func findVolumeByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.Volume, error) { + input := &fsx.DescribeVolumesInput{ + VolumeIds: []string{id}, + } + + return findVolume(ctx, conn, input, tfslices.PredicateTrue[*awstypes.Volume]()) +} + +func findVolumeByIDAndType(ctx context.Context, conn *fsx.Client, volID string, volType awstypes.VolumeType) (*awstypes.Volume, error) { + input := &fsx.DescribeVolumesInput{ + VolumeIds: []string{volID}, + } + filter := func(v *awstypes.Volume) bool { + return v.VolumeType == volType + } + + return findVolume(ctx, conn, input, filter) +} + +func findVolume(ctx context.Context, conn *fsx.Client, input *fsx.DescribeVolumesInput, filter tfslices.Predicate[*awstypes.Volume]) (*awstypes.Volume, error) { + output, err := findVolumes(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findVolumes(ctx context.Context, conn *fsx.Client, input *fsx.DescribeVolumesInput, filter tfslices.Predicate[*awstypes.Volume]) ([]awstypes.Volume, error) { + var output []awstypes.Volume + + pages := fsx.NewDescribeVolumesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.VolumeNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.Volumes { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func statusVolume(ctx context.Context, conn *fsx.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findVolumeByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Lifecycle), nil + } +} + +func waitVolumeCreated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.Volume, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.VolumeLifecycleCreating, awstypes.VolumeLifecyclePending), + Target: enum.Slice(awstypes.VolumeLifecycleCreated, awstypes.VolumeLifecycleMisconfigured, awstypes.VolumeLifecycleAvailable), + Refresh: statusVolume(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Volume); ok { + if output.Lifecycle == awstypes.VolumeLifecycleFailed && output.LifecycleTransitionReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.LifecycleTransitionReason.Message))) + } + + return output, err + } + + return nil, err +} + +func waitVolumeUpdated(ctx context.Context, conn *fsx.Client, id string, startTime time.Time, timeout time.Duration) (*awstypes.Volume, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.VolumeLifecyclePending), + Target: enum.Slice(awstypes.VolumeLifecycleCreated, awstypes.VolumeLifecycleMisconfigured, awstypes.VolumeLifecycleAvailable), + Refresh: statusVolume(ctx, conn, id), + Timeout: timeout, + Delay: 150 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Volume); ok { + switch output.Lifecycle { + case awstypes.VolumeLifecycleFailed: + // Report any failed non-VOLUME_UPDATE administrative actions. + // See https://docs.aws.amazon.com/fsx/latest/APIReference/API_AdministrativeAction.html#FSx-Type-AdministrativeAction-AdministrativeActionType. + administrativeActions := tfslices.Filter(output.AdministrativeActions, func(v awstypes.AdministrativeAction) bool { + return v.Status == awstypes.StatusFailed && v.AdministrativeActionType != awstypes.AdministrativeActionTypeVolumeUpdate && v.FailureDetails != nil && startTime.Before(aws.ToTime(v.RequestTime)) + }) + administrativeActionsError := errors.Join(tfslices.ApplyToAll(administrativeActions, func(v awstypes.AdministrativeAction) error { + return fmt.Errorf("%s: %s", string(v.AdministrativeActionType), aws.ToString(v.FailureDetails.Message)) + })...) + + if reason := output.LifecycleTransitionReason; reason != nil { + if message := aws.ToString(reason.Message); administrativeActionsError != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %w", message, administrativeActionsError)) + } else { + tfresource.SetLastError(err, errors.New(message)) + } + } else { + tfresource.SetLastError(err, administrativeActionsError) + } + } + + return output, err + } + + return nil, err +} + +func waitVolumeDeleted(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.Volume, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.VolumeLifecycleCreated, awstypes.VolumeLifecycleMisconfigured, awstypes.VolumeLifecycleAvailable, awstypes.VolumeLifecycleDeleting), + Target: []string{}, + Refresh: statusVolume(ctx, conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Volume); ok { + if output.Lifecycle == awstypes.VolumeLifecycleFailed && output.LifecycleTransitionReason != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.LifecycleTransitionReason.Message))) + } + + return output, err + } + + return nil, err +} + +func findVolumeAdministrativeAction(ctx context.Context, conn *fsx.Client, volID string, actionType awstypes.AdministrativeActionType) (awstypes.AdministrativeAction, error) { + output, err := findVolumeByID(ctx, conn, volID) + + if err != nil { + return awstypes.AdministrativeAction{}, err + } + + for _, v := range output.AdministrativeActions { + if v.AdministrativeActionType == actionType { + return v, nil + } + } + + // If the administrative action isn't found, assume it's complete. + return awstypes.AdministrativeAction{Status: awstypes.StatusCompleted}, nil +} + +func statusVolumeAdministrativeAction(ctx context.Context, conn *fsx.Client, volID string, actionType awstypes.AdministrativeActionType) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findVolumeAdministrativeAction(ctx, conn, volID, actionType) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitVolumeAdministrativeActionCompleted(ctx context.Context, conn *fsx.Client, volID string, actionType awstypes.AdministrativeActionType, timeout time.Duration) (*awstypes.AdministrativeAction, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.StatusInProgress, awstypes.StatusPending), + Target: enum.Slice(awstypes.StatusCompleted, awstypes.StatusUpdatedOptimizing), + Refresh: statusVolumeAdministrativeAction(ctx, conn, volID, actionType), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.AdministrativeAction); ok { + if output.Status == awstypes.StatusFailed && output.FailureDetails != nil { + tfresource.SetLastError(err, errors.New(aws.ToString(output.FailureDetails.Message))) + } + + return output, err + } + + return nil, err +} + +func expandAggregateConfiguration(tfMap map[string]interface{}) *awstypes.CreateAggregateConfiguration { if tfMap == nil { return nil } - apiObject := &fsx.CreateAggregateConfiguration{} + apiObject := &awstypes.CreateAggregateConfiguration{} if v, ok := tfMap["aggregates"].([]interface{}); ok && v != nil { - apiObject.Aggregates = flex.ExpandStringList(v) + apiObject.Aggregates = flex.ExpandStringValueList(v) } if v, ok := tfMap["constituents_per_aggregate"].(int); ok && v != 0 { - apiObject.ConstituentsPerAggregate = aws.Int64(int64(v)) + apiObject.ConstituentsPerAggregate = aws.Int32(int32(v)) } return apiObject } -func flattenAggregateConfiguration(apiObject *fsx.AggregateConfiguration) map[string]interface{} { +func flattenAggregateConfiguration(apiObject *awstypes.AggregateConfiguration) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - var aggregates int64 + var aggregates int32 if v := apiObject.Aggregates; v != nil { - if v := aws.StringValueSlice(v); v != nil { - tfMap["aggregates"] = v - //Need to get the count of aggregates for calculating constituents_per_aggregate - aggregates = int64(len(v)) - } + tfMap["aggregates"] = v + //Need to get the count of aggregates for calculating constituents_per_aggregate + aggregates = int32(len(v)) } if v := apiObject.TotalConstituents; v != nil { - tfMap["total_constituents"] = aws.Int64Value(v) + tfMap["total_constituents"] = aws.ToInt32(v) //Since the api only returns totalConstituents, need to calculate the value of ConstituentsPerAggregate so state will be consistent with config if aggregates != 0 { - tfMap["constituents_per_aggregate"] = aws.Int64Value(v) / aggregates + tfMap["constituents_per_aggregate"] = aws.ToInt32(v) / aggregates } else { - tfMap["constituents_per_aggregate"] = aws.Int64Value(v) + tfMap["constituents_per_aggregate"] = aws.ToInt32(v) } } @@ -646,29 +872,29 @@ func flattenAggregateConfiguration(apiObject *fsx.AggregateConfiguration) map[st const minTieringPolicyCoolingPeriod = 2 -func expandTieringPolicy(tfMap map[string]interface{}) *fsx.TieringPolicy { +func expandTieringPolicy(tfMap map[string]interface{}) *awstypes.TieringPolicy { if tfMap == nil { return nil } - apiObject := &fsx.TieringPolicy{} + apiObject := &awstypes.TieringPolicy{} // Cooling period only accepts a minimum of 2 but int will return 0 not nil if unset. // Therefore we only set it if it is 2 or more. - if tfMap[names.AttrName].(string) == fsx.TieringPolicyNameAuto || tfMap[names.AttrName].(string) == fsx.TieringPolicyNameSnapshotOnly { + if tfMap[names.AttrName].(string) == string(awstypes.TieringPolicyNameAuto) || tfMap[names.AttrName].(string) == string(awstypes.TieringPolicyNameSnapshotOnly) { if v, ok := tfMap["cooling_period"].(int); ok && v >= minTieringPolicyCoolingPeriod { - apiObject.CoolingPeriod = aws.Int64(int64(v)) + apiObject.CoolingPeriod = aws.Int32(int32(v)) } } if v, ok := tfMap[names.AttrName].(string); ok && v != "" { - apiObject.Name = aws.String(v) + apiObject.Name = awstypes.TieringPolicyName(v) } return apiObject } -func flattenTieringPolicy(apiObject *fsx.TieringPolicy) map[string]interface{} { +func flattenTieringPolicy(apiObject *awstypes.TieringPolicy) map[string]interface{} { if apiObject == nil { return nil } @@ -676,24 +902,22 @@ func flattenTieringPolicy(apiObject *fsx.TieringPolicy) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.CoolingPeriod; v != nil { - if v := aws.Int64Value(v); v >= minTieringPolicyCoolingPeriod { + if v := aws.ToInt32(v); v >= minTieringPolicyCoolingPeriod { tfMap["cooling_period"] = v } } - if v := apiObject.Name; v != nil { - tfMap[names.AttrName] = aws.StringValue(v) - } + tfMap[names.AttrName] = string(apiObject.Name) return tfMap } -func expandCreateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.CreateSnaplockConfiguration { +func expandCreateSnaplockConfiguration(tfMap map[string]interface{}) *awstypes.CreateSnaplockConfiguration { if tfMap == nil { return nil } - apiObject := &fsx.CreateSnaplockConfiguration{} + apiObject := &awstypes.CreateSnaplockConfiguration{} if v, ok := tfMap["audit_log_volume"].(bool); ok && v { apiObject.AuditLogVolume = aws.Bool(v) @@ -704,7 +928,7 @@ func expandCreateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.Create } if v, ok := tfMap["privileged_delete"].(string); ok && v != "" { - apiObject.PrivilegedDelete = aws.String(v) + apiObject.PrivilegedDelete = awstypes.PrivilegedDelete(v) } if v, ok := tfMap[names.AttrRetentionPeriod].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -712,7 +936,7 @@ func expandCreateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.Create } if v, ok := tfMap["snaplock_type"].(string); ok && v != "" { - apiObject.SnaplockType = aws.String(v) + apiObject.SnaplockType = awstypes.SnaplockType(v) } if v, ok := tfMap["volume_append_mode_enabled"].(bool); ok && v { @@ -722,12 +946,12 @@ func expandCreateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.Create return apiObject } -func expandUpdateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.UpdateSnaplockConfiguration { +func expandUpdateSnaplockConfiguration(tfMap map[string]interface{}) *awstypes.UpdateSnaplockConfiguration { if tfMap == nil { return nil } - apiObject := &fsx.UpdateSnaplockConfiguration{} + apiObject := &awstypes.UpdateSnaplockConfiguration{} if v, ok := tfMap["audit_log_volume"].(bool); ok && v { apiObject.AuditLogVolume = aws.Bool(v) @@ -738,7 +962,7 @@ func expandUpdateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.Update } if v, ok := tfMap["privileged_delete"].(string); ok && v != "" { - apiObject.PrivilegedDelete = aws.String(v) + apiObject.PrivilegedDelete = awstypes.PrivilegedDelete(v) } if v, ok := tfMap[names.AttrRetentionPeriod].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -752,30 +976,30 @@ func expandUpdateSnaplockConfiguration(tfMap map[string]interface{}) *fsx.Update return apiObject } -func expandAutocommitPeriod(tfMap map[string]interface{}) *fsx.AutocommitPeriod { +func expandAutocommitPeriod(tfMap map[string]interface{}) *awstypes.AutocommitPeriod { if tfMap == nil { return nil } - apiObject := &fsx.AutocommitPeriod{} + apiObject := &awstypes.AutocommitPeriod{} if v, ok := tfMap[names.AttrType].(string); ok && v != "" { - apiObject.Type = aws.String(v) + apiObject.Type = awstypes.AutocommitPeriodType(v) } if v, ok := tfMap[names.AttrValue].(int); ok && v != 0 { - apiObject.Value = aws.Int64(int64(v)) + apiObject.Value = aws.Int32(int32(v)) } return apiObject } -func expandSnaplockRetentionPeriod(tfMap map[string]interface{}) *fsx.SnaplockRetentionPeriod { +func expandSnaplockRetentionPeriod(tfMap map[string]interface{}) *awstypes.SnaplockRetentionPeriod { if tfMap == nil { return nil } - apiObject := &fsx.SnaplockRetentionPeriod{} + apiObject := &awstypes.SnaplockRetentionPeriod{} if v, ok := tfMap["default_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.DefaultRetention = expandRetentionPeriod(v[0].(map[string]interface{})) @@ -792,25 +1016,25 @@ func expandSnaplockRetentionPeriod(tfMap map[string]interface{}) *fsx.SnaplockRe return apiObject } -func expandRetentionPeriod(tfMap map[string]interface{}) *fsx.RetentionPeriod { +func expandRetentionPeriod(tfMap map[string]interface{}) *awstypes.RetentionPeriod { if tfMap == nil { return nil } - apiObject := &fsx.RetentionPeriod{} + apiObject := &awstypes.RetentionPeriod{} if v, ok := tfMap[names.AttrType].(string); ok && v != "" { - apiObject.Type = aws.String(v) + apiObject.Type = awstypes.RetentionPeriodType(v) } if v, ok := tfMap[names.AttrValue].(int); ok && v != 0 { - apiObject.Value = aws.Int64(int64(v)) + apiObject.Value = aws.Int32(int32(v)) } return apiObject } -func flattenSnaplockConfiguration(apiObject *fsx.SnaplockConfiguration) map[string]interface{} { +func flattenSnaplockConfiguration(apiObject *awstypes.SnaplockConfiguration) map[string]interface{} { if apiObject == nil { return nil } @@ -818,51 +1042,45 @@ func flattenSnaplockConfiguration(apiObject *fsx.SnaplockConfiguration) map[stri tfMap := map[string]interface{}{} if v := apiObject.AuditLogVolume; v != nil { - tfMap["audit_log_volume"] = aws.BoolValue(v) + tfMap["audit_log_volume"] = aws.ToBool(v) } if v := apiObject.AutocommitPeriod; v != nil { tfMap["autocommit_period"] = []interface{}{flattenAutocommitPeriod(v)} } - if v := apiObject.PrivilegedDelete; v != nil { - tfMap["privileged_delete"] = aws.StringValue(v) - } + tfMap["privileged_delete"] = string(apiObject.PrivilegedDelete) if v := apiObject.RetentionPeriod; v != nil { tfMap[names.AttrRetentionPeriod] = []interface{}{flattenSnaplockRetentionPeriod(v)} } - if v := apiObject.SnaplockType; v != nil { - tfMap["snaplock_type"] = aws.StringValue(v) - } + tfMap["snaplock_type"] = string(apiObject.SnaplockType) if v := apiObject.VolumeAppendModeEnabled; v != nil { - tfMap["volume_append_mode_enabled"] = aws.BoolValue(v) + tfMap["volume_append_mode_enabled"] = aws.ToBool(v) } return tfMap } -func flattenAutocommitPeriod(apiObject *fsx.AutocommitPeriod) map[string]interface{} { +func flattenAutocommitPeriod(apiObject *awstypes.AutocommitPeriod) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.Type; v != nil { - tfMap[names.AttrType] = aws.StringValue(v) - } + tfMap[names.AttrType] = string(apiObject.Type) if v := apiObject.Value; v != nil { - tfMap[names.AttrValue] = aws.Int64Value(v) + tfMap[names.AttrValue] = aws.ToInt32(v) } return tfMap } -func flattenSnaplockRetentionPeriod(apiObject *fsx.SnaplockRetentionPeriod) map[string]interface{} { +func flattenSnaplockRetentionPeriod(apiObject *awstypes.SnaplockRetentionPeriod) map[string]interface{} { if apiObject == nil { return nil } @@ -884,253 +1102,18 @@ func flattenSnaplockRetentionPeriod(apiObject *fsx.SnaplockRetentionPeriod) map[ return tfMap } -func flattenRetentionPeriod(apiObject *fsx.RetentionPeriod) map[string]interface{} { +func flattenRetentionPeriod(apiObject *awstypes.RetentionPeriod) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.Type; v != nil { - tfMap[names.AttrType] = aws.StringValue(v) - } + tfMap[names.AttrType] = string(apiObject.Type) if v := apiObject.Value; v != nil { - tfMap[names.AttrValue] = aws.Int64Value(v) + tfMap[names.AttrValue] = aws.ToInt32(v) } return tfMap } - -func findONTAPVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { - output, err := findVolumeByIDAndType(ctx, conn, id, fsx.VolumeTypeOntap) - - if err != nil { - return nil, err - } - - if output.OntapConfiguration == nil { - return nil, tfresource.NewEmptyResultError(nil) - } - - return output, nil -} - -func findVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { - input := &fsx.DescribeVolumesInput{ - VolumeIds: aws.StringSlice([]string{id}), - } - - return findVolume(ctx, conn, input, tfslices.PredicateTrue[*fsx.Volume]()) -} - -func findVolumeByIDAndType(ctx context.Context, conn *fsx.FSx, volID, volType string) (*fsx.Volume, error) { - input := &fsx.DescribeVolumesInput{ - VolumeIds: aws.StringSlice([]string{volID}), - } - filter := func(fs *fsx.Volume) bool { - return aws.StringValue(fs.VolumeType) == volType - } - - return findVolume(ctx, conn, input, filter) -} - -func findVolume(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput, filter tfslices.Predicate[*fsx.Volume]) (*fsx.Volume, error) { - output, err := findVolumes(ctx, conn, input, filter) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - -func findVolumes(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeVolumesInput, filter tfslices.Predicate[*fsx.Volume]) ([]*fsx.Volume, error) { - var output []*fsx.Volume - - err := conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.Volumes { - if v != nil && filter(v) { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - return output, nil -} - -func statusVolume(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := findVolumeByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Lifecycle), nil - } -} - -func waitVolumeCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.VolumeLifecycleCreating, fsx.VolumeLifecyclePending}, - Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, - Refresh: statusVolume(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.Volume); ok { - if status, reason := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && reason != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(reason.Message))) - } - - return output, err - } - - return nil, err -} - -func waitVolumeUpdated(ctx context.Context, conn *fsx.FSx, id string, startTime time.Time, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.VolumeLifecyclePending}, - Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, - Refresh: statusVolume(ctx, conn, id), - Timeout: timeout, - Delay: 150 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.Volume); ok { - switch status := aws.StringValue(output.Lifecycle); status { - case fsx.VolumeLifecycleFailed: - // Report any failed non-VOLUME_UPDATE administrative actions. - // See https://docs.aws.amazon.com/fsx/latest/APIReference/API_AdministrativeAction.html#FSx-Type-AdministrativeAction-AdministrativeActionType. - administrativeActions := tfslices.Filter(output.AdministrativeActions, func(v *fsx.AdministrativeAction) bool { - return v != nil && aws.StringValue(v.Status) == fsx.StatusFailed && aws.StringValue(v.AdministrativeActionType) != fsx.AdministrativeActionTypeVolumeUpdate && v.FailureDetails != nil && startTime.Before(aws.TimeValue(v.RequestTime)) - }) - administrativeActionsError := errors.Join(tfslices.ApplyToAll(administrativeActions, func(v *fsx.AdministrativeAction) error { - return fmt.Errorf("%s: %s", aws.StringValue(v.AdministrativeActionType), aws.StringValue(v.FailureDetails.Message)) - })...) - - if reason := output.LifecycleTransitionReason; reason != nil { - if message := aws.StringValue(reason.Message); administrativeActionsError != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %w", message, administrativeActionsError)) - } else { - tfresource.SetLastError(err, errors.New(message)) - } - } else { - tfresource.SetLastError(err, administrativeActionsError) - } - } - - return output, err - } - - return nil, err -} - -func waitVolumeDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable, fsx.VolumeLifecycleDeleting}, - Target: []string{}, - Refresh: statusVolume(ctx, conn, id), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.Volume); ok { - if status, reason := aws.StringValue(output.Lifecycle), output.LifecycleTransitionReason; status == fsx.VolumeLifecycleFailed && reason != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(reason.Message))) - } - - return output, err - } - - return nil, err -} - -func findVolumeAdministrativeAction(ctx context.Context, conn *fsx.FSx, volID, actionType string) (*fsx.AdministrativeAction, error) { - output, err := findVolumeByID(ctx, conn, volID) - - if err != nil { - return nil, err - } - - for _, v := range output.AdministrativeActions { - if v == nil { - continue - } - - if aws.StringValue(v.AdministrativeActionType) == actionType { - return v, nil - } - } - - // If the administrative action isn't found, assume it's complete. - return &fsx.AdministrativeAction{Status: aws.String(fsx.StatusCompleted)}, nil -} - -func statusVolumeAdministrativeAction(ctx context.Context, conn *fsx.FSx, volID, actionType string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := findVolumeAdministrativeAction(ctx, conn, volID, actionType) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - -func waitVolumeAdministrativeActionCompleted(ctx context.Context, conn *fsx.FSx, volID, actionType string, timeout time.Duration) (*fsx.AdministrativeAction, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.StatusInProgress, fsx.StatusPending}, - Target: []string{fsx.StatusCompleted, fsx.StatusUpdatedOptimizing}, - Refresh: statusVolumeAdministrativeAction(ctx, conn, volID, actionType), - Timeout: timeout, - Delay: 30 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*fsx.AdministrativeAction); ok { - if status, details := aws.StringValue(output.Status), output.FailureDetails; status == fsx.StatusFailed && details != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.FailureDetails.Message))) - } - - return output, err - } - - return nil, err -} diff --git a/internal/service/fsx/ontap_volume_test.go b/internal/service/fsx/ontap_volume_test.go index e434a09d0403..5a79f6315623 100644 --- a/internal/service/fsx/ontap_volume_test.go +++ b/internal/service/fsx/ontap_volume_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,12 +23,12 @@ import ( func TestAccFSxONTAPVolume_basic(t *testing.T) { ctx := acctest.Context(t) - var volume fsx.Volume + var volume awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -72,12 +72,12 @@ func TestAccFSxONTAPVolume_basic(t *testing.T) { func TestAccFSxONTAPVolume_disappears(t *testing.T) { ctx := acctest.Context(t) - var volume fsx.Volume + var volume awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -96,13 +96,13 @@ func TestAccFSxONTAPVolume_disappears(t *testing.T) { func TestAccFSxONTAPVolume_aggregateConfiguration(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume ConstituentsPerAggregate := 10 resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -146,12 +146,12 @@ func TestAccFSxONTAPVolume_aggregateConfiguration(t *testing.T) { func TestAccFSxONTAPVolume_copyTagsToBackups(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -187,14 +187,14 @@ func TestAccFSxONTAPVolume_copyTagsToBackups(t *testing.T) { func TestAccFSxONTAPVolume_junctionPath(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) jPath1 := "/path1" jPath2 := "/path2" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -232,13 +232,13 @@ func TestAccFSxONTAPVolume_junctionPath(t *testing.T) { func TestAccFSxONTAPVolume_name(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) rName2 := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -274,12 +274,12 @@ func TestAccFSxONTAPVolume_name(t *testing.T) { func TestAccFSxONTAPVolume_ontapVolumeType(t *testing.T) { ctx := acctest.Context(t) - var volume fsx.Volume + var volume awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -307,12 +307,12 @@ func TestAccFSxONTAPVolume_ontapVolumeType(t *testing.T) { func TestAccFSxONTAPVolume_securityStyle(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2, volume3 fsx.Volume + var volume1, volume2, volume3 awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -359,7 +359,7 @@ func TestAccFSxONTAPVolume_securityStyle(t *testing.T) { func TestAccFSxONTAPVolume_size(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2, volume3, volume4 fsx.Volume + var volume1, volume2, volume3, volume4 awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) size1 := 1024 @@ -368,7 +368,7 @@ func TestAccFSxONTAPVolume_size(t *testing.T) { size4 := int64(1125899906842623) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -424,12 +424,12 @@ func TestAccFSxONTAPVolume_size(t *testing.T) { func TestAccFSxONTAPVolume_snaplock(t *testing.T) { ctx := acctest.Context(t) - var volume1 /*, volume2*/ fsx.Volume + var volume1 /*, volume2*/ awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -506,14 +506,14 @@ func TestAccFSxONTAPVolume_snaplock(t *testing.T) { func TestAccFSxONTAPVolume_snapshotPolicy(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) policy1 := "default" policy2 := "none" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -551,12 +551,12 @@ func TestAccFSxONTAPVolume_snapshotPolicy(t *testing.T) { func TestAccFSxONTAPVolume_storageEfficiency(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -594,12 +594,12 @@ func TestAccFSxONTAPVolume_storageEfficiency(t *testing.T) { func TestAccFSxONTAPVolume_tags(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2, volume3 fsx.Volume + var volume1, volume2, volume3 awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -647,12 +647,12 @@ func TestAccFSxONTAPVolume_tags(t *testing.T) { func TestAccFSxONTAPVolume_tieringPolicy(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2, volume3, volume4 fsx.Volume + var volume1, volume2, volume3, volume4 awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -710,14 +710,14 @@ func TestAccFSxONTAPVolume_tieringPolicy(t *testing.T) { func TestAccFSxONTAPVolume_volumeStyle(t *testing.T) { ctx := acctest.Context(t) - var volume fsx.Volume + var volume awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" style1 := "FLEXVOL" style2 := "FLEXGROUP" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -752,14 +752,14 @@ func TestAccFSxONTAPVolume_volumeStyle(t *testing.T) { func TestAccFSxONTAPVolume_deleteConfig(t *testing.T) { ctx := acctest.Context(t) - var volume fsx.Volume + var volume awstypes.Volume resourceName := "aws_fsx_ontap_volume.test" rName := fmt.Sprintf("tf_acc_test_%d", sdkacctest.RandInt()) acctest.SkipIfEnvVarNotSet(t, "AWS_FSX_CREATE_FINAL_BACKUP") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckONTAPVolumeDestroy(ctx), @@ -788,14 +788,14 @@ func TestAccFSxONTAPVolume_deleteConfig(t *testing.T) { }) } -func testAccCheckONTAPVolumeExists(ctx context.Context, n string, v *fsx.Volume) resource.TestCheckFunc { +func testAccCheckONTAPVolumeExists(ctx context.Context, n string, v *awstypes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindONTAPVolumeByID(ctx, conn, rs.Primary.ID) @@ -811,7 +811,7 @@ func testAccCheckONTAPVolumeExists(ctx context.Context, n string, v *fsx.Volume) func testAccCheckONTAPVolumeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_ontap_volume" { @@ -834,20 +834,20 @@ func testAccCheckONTAPVolumeDestroy(ctx context.Context) resource.TestCheckFunc } } -func testAccCheckONTAPVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckONTAPVolumeNotRecreated(i, j *awstypes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.VolumeId) != aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) recreated", aws.StringValue(i.VolumeId)) + if aws.ToString(i.VolumeId) != aws.ToString(j.VolumeId) { + return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) recreated", aws.ToString(i.VolumeId)) } return nil } } -func testAccCheckONTAPVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckONTAPVolumeRecreated(i, j *awstypes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.VolumeId) == aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) not recreated", aws.StringValue(i.VolumeId)) + if aws.ToString(i.VolumeId) == aws.ToString(j.VolumeId) { + return fmt.Errorf("FSx for NetApp ONTAP Volume (%s) not recreated", aws.ToString(i.VolumeId)) } return nil diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index cd83f1fdf3b1..b5a89c3eb4f9 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -11,15 +11,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -90,15 +92,15 @@ func resourceOpenZFSFileSystem() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(fsx.DeleteFileSystemOpenZFSOption_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.DeleteFileSystemOpenZFSOption](), }, }, "deployment_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(fsx.OpenZFSDeploymentType_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.OpenZFSDeploymentType](), }, "disk_iops_configuration": { Type: schema.TypeList, @@ -113,10 +115,10 @@ func resourceOpenZFSFileSystem() *schema.Resource { Computed: true, }, names.AttrMode: { - Type: schema.TypeString, - Optional: true, - Default: fsx.DiskIopsConfigurationModeAutomatic, - ValidateFunc: validation.StringInSlice(fsx.DiskIopsConfigurationMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DiskIopsConfigurationModeAutomatic, + ValidateDiagFunc: enum.Validate[awstypes.DiskIopsConfigurationMode](), }, }, }, @@ -170,9 +172,9 @@ func resourceOpenZFSFileSystem() *schema.Resource { ForceNew: true, }, "data_compression_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(fsx.OpenZFSDataCompressionType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.OpenZFSDataCompressionType](), }, "nfs_exports": { Type: schema.TypeList, @@ -239,9 +241,9 @@ func resourceOpenZFSFileSystem() *schema.Resource { ValidateFunc: validation.IntBetween(0, 2147483647), }, names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(fsx.OpenZFSQuotaType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.OpenZFSQuotaType](), }, }, }, @@ -278,11 +280,11 @@ func resourceOpenZFSFileSystem() *schema.Resource { ValidateFunc: validation.IntBetween(64, 512*1024), }, names.AttrStorageType: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: fsx.StorageTypeSsd, - ValidateFunc: validation.StringInSlice(fsx.StorageType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.StorageTypeSsd, + ValidateDiagFunc: enum.Validate[awstypes.StorageType](), }, names.AttrSubnetIDs: { Type: schema.TypeList, @@ -322,11 +324,11 @@ func resourceOpenZFSFileSystem() *schema.Resource { ) switch deploymentType, throughputCapacity := d.Get("deployment_type").(string), d.Get("throughput_capacity").(int); deploymentType { - case fsx.OpenZFSDeploymentTypeSingleAz1: + case string(awstypes.OpenZFSDeploymentTypeSingleAz1): if !slices.Contains(singleAZ1ThroughputCapacityValues, throughputCapacity) { return fmt.Errorf("%d is not a valid value for `throughput_capacity` when `deployment_type` is %q. Valid values: %v", throughputCapacity, deploymentType, singleAZ1ThroughputCapacityValues) } - case fsx.OpenZFSDeploymentTypeSingleAz2, fsx.OpenZFSDeploymentTypeMultiAz1: + case string(awstypes.OpenZFSDeploymentTypeSingleAz2), string(awstypes.OpenZFSDeploymentTypeMultiAz1): if !slices.Contains(singleAZ2AndMultiAZ1ThroughputCapacityValues, throughputCapacity) { return fmt.Errorf("%d is not a valid value for `throughput_capacity` when `deployment_type` is %q. Valid values: %v", throughputCapacity, deploymentType, singleAZ2AndMultiAZ1ThroughputCapacityValues) } @@ -348,13 +350,13 @@ func validateDiskConfigurationIOPS(_ context.Context, d *schema.ResourceDiff, me m := diskConfiguration.([]interface{})[0].(map[string]interface{}) if v, ok := m[names.AttrIOPS].(int); ok { - if deploymentType == fsx.OpenZFSDeploymentTypeSingleAz1 { + if deploymentType == string(awstypes.OpenZFSDeploymentTypeSingleAz1) { if v < 0 || v > 160000 { - return fmt.Errorf("expected disk_iops_configuration.0.iops to be in the range (0 - 160000) when deployment_type (%s), got %d", fsx.OpenZFSDeploymentTypeSingleAz1, v) + return fmt.Errorf("expected disk_iops_configuration.0.iops to be in the range (0 - 160000) when deployment_type (%s), got %d", awstypes.OpenZFSDeploymentTypeSingleAz1, v) } - } else if deploymentType == fsx.OpenZFSDeploymentTypeSingleAz2 { + } else if deploymentType == string(awstypes.OpenZFSDeploymentTypeSingleAz2) { if v < 0 || v > 350000 { - return fmt.Errorf("expected disk_iops_configuration.0.iops to be in the range (0 - 350000) when deployment_type (%s), got %d", fsx.OpenZFSDeploymentTypeSingleAz2, v) + return fmt.Errorf("expected disk_iops_configuration.0.iops to be in the range (0 - 350000) when deployment_type (%s), got %d", awstypes.OpenZFSDeploymentTypeSingleAz2, v) } } } @@ -366,28 +368,28 @@ func validateDiskConfigurationIOPS(_ context.Context, d *schema.ResourceDiff, me func resourceOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) inputC := &fsx.CreateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), - FileSystemType: aws.String(fsx.FileSystemTypeOpenzfs), - OpenZFSConfiguration: &fsx.CreateFileSystemOpenZFSConfiguration{ - DeploymentType: aws.String(d.Get("deployment_type").(string)), - AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), + FileSystemType: awstypes.FileSystemTypeOpenzfs, + OpenZFSConfiguration: &awstypes.CreateFileSystemOpenZFSConfiguration{ + DeploymentType: awstypes.OpenZFSDeploymentType(d.Get("deployment_type").(string)), + AutomaticBackupRetentionDays: aws.Int32(int32(d.Get("automatic_backup_retention_days").(int))), }, - StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), - StorageType: aws.String(d.Get(names.AttrStorageType).(string)), - SubnetIds: flex.ExpandStringList(d.Get(names.AttrSubnetIDs).([]interface{})), + StorageCapacity: aws.Int32(int32(d.Get("storage_capacity").(int))), + StorageType: awstypes.StorageType(d.Get(names.AttrStorageType).(string)), + SubnetIds: flex.ExpandStringValueList(d.Get(names.AttrSubnetIDs).([]interface{})), Tags: getTagsIn(ctx), } inputB := &fsx.CreateFileSystemFromBackupInput{ ClientRequestToken: aws.String(id.UniqueId()), - OpenZFSConfiguration: &fsx.CreateFileSystemOpenZFSConfiguration{ - DeploymentType: aws.String(d.Get("deployment_type").(string)), - AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), + OpenZFSConfiguration: &awstypes.CreateFileSystemOpenZFSConfiguration{ + DeploymentType: awstypes.OpenZFSDeploymentType(d.Get("deployment_type").(string)), + AutomaticBackupRetentionDays: aws.Int32(int32(d.Get("automatic_backup_retention_days").(int))), }, - StorageType: aws.String(d.Get(names.AttrStorageType).(string)), - SubnetIds: flex.ExpandStringList(d.Get(names.AttrSubnetIDs).([]interface{})), + StorageType: awstypes.StorageType(d.Get(names.AttrStorageType).(string)), + SubnetIds: flex.ExpandStringValueList(d.Get(names.AttrSubnetIDs).([]interface{})), Tags: getTagsIn(ctx), } @@ -432,18 +434,18 @@ func resourceOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk("route_table_ids"); ok { - inputC.OpenZFSConfiguration.RouteTableIds = flex.ExpandStringSet(v.(*schema.Set)) - inputB.OpenZFSConfiguration.RouteTableIds = flex.ExpandStringSet(v.(*schema.Set)) + inputC.OpenZFSConfiguration.RouteTableIds = flex.ExpandStringValueSet(v.(*schema.Set)) + inputB.OpenZFSConfiguration.RouteTableIds = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk(names.AttrSecurityGroupIDs); ok { - inputC.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) - inputB.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + inputC.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) + inputB.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("throughput_capacity"); ok { - inputC.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(v.(int))) - inputB.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(v.(int))) + inputC.OpenZFSConfiguration.ThroughputCapacity = aws.Int32(int32(v.(int))) + inputB.OpenZFSConfiguration.ThroughputCapacity = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { @@ -455,21 +457,21 @@ func resourceOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData backupID := v.(string) inputB.BackupId = aws.String(backupID) - output, err := conn.CreateFileSystemFromBackupWithContext(ctx, inputB) + output, err := conn.CreateFileSystemFromBackup(ctx, inputB) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for OpenZFS File System from backup (%s): %s", backupID, err) } - d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) + d.SetId(aws.ToString(output.FileSystem.FileSystemId)) } else { - output, err := conn.CreateFileSystemWithContext(ctx, inputC) + output, err := conn.CreateFileSystem(ctx, inputC) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for OpenZFS File System: %s", err) } - d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) + d.SetId(aws.ToString(output.FileSystem.FileSystemId)) } if _, err := waitFileSystemCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -481,7 +483,7 @@ func resourceOpenZFSFileSystemCreate(ctx context.Context, d *schema.ResourceData func resourceOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) filesystem, err := findOpenZFSFileSystemByID(ctx, conn, d.Id()) @@ -510,15 +512,15 @@ func resourceOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, d.Set("endpoint_ip_address", openZFSConfig.EndpointIpAddress) d.Set("endpoint_ip_address_range", openZFSConfig.EndpointIpAddressRange) d.Set(names.AttrKMSKeyID, filesystem.KmsKeyId) - d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)) + d.Set("network_interface_ids", filesystem.NetworkInterfaceIds) d.Set(names.AttrOwnerID, filesystem.OwnerId) d.Set("preferred_subnet_id", openZFSConfig.PreferredSubnetId) - rootVolumeID := aws.StringValue(openZFSConfig.RootVolumeId) + rootVolumeID := aws.ToString(openZFSConfig.RootVolumeId) d.Set("root_volume_id", rootVolumeID) - d.Set("route_table_ids", aws.StringValueSlice(openZFSConfig.RouteTableIds)) + d.Set("route_table_ids", openZFSConfig.RouteTableIds) d.Set("storage_capacity", filesystem.StorageCapacity) d.Set(names.AttrStorageType, filesystem.StorageType) - d.Set(names.AttrSubnetIDs, aws.StringValueSlice(filesystem.SubnetIds)) + d.Set(names.AttrSubnetIDs, filesystem.SubnetIds) d.Set("throughput_capacity", openZFSConfig.ThroughputCapacity) d.Set(names.AttrVPCID, filesystem.VpcId) d.Set("weekly_maintenance_start_time", openZFSConfig.WeeklyMaintenanceStartTime) @@ -541,7 +543,7 @@ func resourceOpenZFSFileSystemRead(ctx context.Context, d *schema.ResourceData, func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChangesExcept( "delete_options", @@ -553,11 +555,11 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), - OpenZFSConfiguration: &fsx.UpdateFileSystemOpenZFSConfiguration{}, + OpenZFSConfiguration: &awstypes.UpdateFileSystemOpenZFSConfiguration{}, } if d.HasChange("automatic_backup_retention_days") { - input.OpenZFSConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))) + input.OpenZFSConfiguration.AutomaticBackupRetentionDays = aws.Int32(int32(d.Get("automatic_backup_retention_days").(int))) } if d.HasChange("copy_tags_to_backups") { @@ -582,19 +584,19 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData add, del := flex.ExpandStringValueSet(ns.Difference(os)), flex.ExpandStringValueSet(os.Difference(ns)) if len(add) > 0 { - input.OpenZFSConfiguration.AddRouteTableIds = aws.StringSlice(add) + input.OpenZFSConfiguration.AddRouteTableIds = add } if len(del) > 0 { - input.OpenZFSConfiguration.RemoveRouteTableIds = aws.StringSlice(del) + input.OpenZFSConfiguration.RemoveRouteTableIds = del } } if d.HasChange("storage_capacity") { - input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int))) + input.StorageCapacity = aws.Int32(int32(d.Get("storage_capacity").(int))) } if d.HasChange("throughput_capacity") { - input.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(d.Get("throughput_capacity").(int))) + input.OpenZFSConfiguration.ThroughputCapacity = aws.Int32(int32(d.Get("throughput_capacity").(int))) } if d.HasChange("weekly_maintenance_start_time") { @@ -602,7 +604,7 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData } startTime := time.Now() - _, err := conn.UpdateFileSystemWithContext(ctx, input) + _, err := conn.UpdateFileSystem(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for OpenZFS File System (%s): %s", d.Id(), err) @@ -612,8 +614,8 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) update: %s", d.Id(), err) } - if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS File System (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, err) } if d.HasChange("root_volume_configuration") { @@ -625,7 +627,7 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData } startTime := time.Now() - _, err := conn.UpdateVolumeWithContext(ctx, input) + _, err := conn.UpdateVolume(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for OpenZFS Root Volume (%s): %s", rootVolumeID, err) @@ -635,8 +637,8 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Root Volume (%s) update: %s", rootVolumeID, err) } - if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, rootVolumeID, fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) administrative action (%s) complete: %s", rootVolumeID, fsx.AdministrativeActionTypeVolumeUpdate, err) + if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, rootVolumeID, awstypes.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) administrative action (%s) complete: %s", rootVolumeID, awstypes.AdministrativeActionTypeVolumeUpdate, err) } } } @@ -646,17 +648,17 @@ func resourceOpenZFSFileSystemUpdate(ctx context.Context, d *schema.ResourceData func resourceOpenZFSFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.DeleteFileSystemInput{ FileSystemId: aws.String(d.Id()), - OpenZFSConfiguration: &fsx.DeleteFileSystemOpenZFSConfiguration{ + OpenZFSConfiguration: &awstypes.DeleteFileSystemOpenZFSConfiguration{ SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), }, } if v, ok := d.GetOk("delete_options"); ok { - input.OpenZFSConfiguration.Options = flex.ExpandStringSet(v.(*schema.Set)) + input.OpenZFSConfiguration.Options = flex.ExpandStringyValueSet[awstypes.DeleteFileSystemOpenZFSOption](v.(*schema.Set)) } if v, ok := d.GetOk("final_backup_tags"); ok && len(v.(map[string]interface{})) > 0 { @@ -664,9 +666,9 @@ func resourceOpenZFSFileSystemDelete(ctx context.Context, d *schema.ResourceData } log.Printf("[DEBUG] Deleting FSx for OpenZFS File System: %s", d.Id()) - _, err := conn.DeleteFileSystemWithContext(ctx, input) + _, err := conn.DeleteFileSystem(ctx, input) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { + if errs.IsA[*awstypes.FileSystemNotFound](err) { return diags } @@ -681,17 +683,17 @@ func resourceOpenZFSFileSystemDelete(ctx context.Context, d *schema.ResourceData return diags } -func expandDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { +func expandDiskIopsConfiguration(cfg []interface{}) *awstypes.DiskIopsConfiguration { if len(cfg) < 1 { return nil } conf := cfg[0].(map[string]interface{}) - out := fsx.DiskIopsConfiguration{} + out := awstypes.DiskIopsConfiguration{} if v, ok := conf[names.AttrMode].(string); ok && len(v) > 0 { - out.Mode = aws.String(v) + out.Mode = awstypes.DiskIopsConfigurationMode(v) } if v, ok := conf[names.AttrIOPS].(int); ok { @@ -701,21 +703,21 @@ func expandDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { return &out } -func expandOpenZFSCreateRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateRootVolumeConfiguration { +func expandOpenZFSCreateRootVolumeConfiguration(cfg []interface{}) *awstypes.OpenZFSCreateRootVolumeConfiguration { if len(cfg) < 1 { return nil } conf := cfg[0].(map[string]interface{}) - out := fsx.OpenZFSCreateRootVolumeConfiguration{} + out := awstypes.OpenZFSCreateRootVolumeConfiguration{} if v, ok := conf["copy_tags_to_snapshots"].(bool); ok { out.CopyTagsToSnapshots = aws.Bool(v) } if v, ok := conf["data_compression_type"].(string); ok { - out.DataCompressionType = aws.String(v) + out.DataCompressionType = awstypes.OpenZFSDataCompressionType(v) } if v, ok := conf["read_only"].(bool); ok { @@ -723,7 +725,7 @@ func expandOpenZFSCreateRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSC } if v, ok := conf["record_size_kib"].(int); ok { - out.RecordSizeKiB = aws.Int64(int64(v)) + out.RecordSizeKiB = aws.Int32(int32(v)) } if v, ok := conf["user_and_group_quotas"]; ok { @@ -737,17 +739,17 @@ func expandOpenZFSCreateRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSC return &out } -func expandUpdateOpenZFSVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZFSVolumeConfiguration { +func expandUpdateOpenZFSVolumeConfiguration(cfg []interface{}) *awstypes.UpdateOpenZFSVolumeConfiguration { if len(cfg) < 1 { return nil } conf := cfg[0].(map[string]interface{}) - out := fsx.UpdateOpenZFSVolumeConfiguration{} + out := awstypes.UpdateOpenZFSVolumeConfiguration{} if v, ok := conf["data_compression_type"].(string); ok { - out.DataCompressionType = aws.String(v) + out.DataCompressionType = awstypes.OpenZFSDataCompressionType(v) } if v, ok := conf["read_only"].(bool); ok { @@ -755,7 +757,7 @@ func expandUpdateOpenZFSVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZF } if v, ok := conf["record_size_kib"].(int); ok { - out.RecordSizeKiB = aws.Int64(int64(v)) + out.RecordSizeKiB = aws.Int32(int32(v)) } if v, ok := conf["user_and_group_quotas"]; ok { @@ -769,42 +771,38 @@ func expandUpdateOpenZFSVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZF return &out } -func flattenDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { +func flattenDiskIopsConfiguration(rs *awstypes.DiskIopsConfiguration) []interface{} { if rs == nil { return []interface{}{} } m := make(map[string]interface{}) - if rs.Mode != nil { - m[names.AttrMode] = aws.StringValue(rs.Mode) - } + m[names.AttrMode] = string(rs.Mode) if rs.Iops != nil { - m[names.AttrIOPS] = aws.Int64Value(rs.Iops) + m[names.AttrIOPS] = aws.ToInt64(rs.Iops) } return []interface{}{m} } -func flattenOpenZFSFileSystemRootVolume(rs *fsx.Volume) []interface{} { +func flattenOpenZFSFileSystemRootVolume(rs *awstypes.Volume) []interface{} { if rs == nil { return []interface{}{} } m := make(map[string]interface{}) if rs.OpenZFSConfiguration.CopyTagsToSnapshots != nil { - m["copy_tags_to_snapshots"] = aws.BoolValue(rs.OpenZFSConfiguration.CopyTagsToSnapshots) - } - if rs.OpenZFSConfiguration.DataCompressionType != nil { - m["data_compression_type"] = aws.StringValue(rs.OpenZFSConfiguration.DataCompressionType) + m["copy_tags_to_snapshots"] = aws.ToBool(rs.OpenZFSConfiguration.CopyTagsToSnapshots) } + m["data_compression_type"] = string(rs.OpenZFSConfiguration.DataCompressionType) if rs.OpenZFSConfiguration.NfsExports != nil { m["nfs_exports"] = flattenOpenZFSNfsExports(rs.OpenZFSConfiguration.NfsExports) } if rs.OpenZFSConfiguration.ReadOnly != nil { - m["read_only"] = aws.BoolValue(rs.OpenZFSConfiguration.ReadOnly) + m["read_only"] = aws.ToBool(rs.OpenZFSConfiguration.ReadOnly) } if rs.OpenZFSConfiguration.RecordSizeKiB != nil { - m["record_size_kib"] = aws.Int64Value(rs.OpenZFSConfiguration.RecordSizeKiB) + m["record_size_kib"] = aws.ToInt32(rs.OpenZFSConfiguration.RecordSizeKiB) } if rs.OpenZFSConfiguration.UserAndGroupQuotas != nil { m["user_and_group_quotas"] = flattenOpenZFSUserOrGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas) @@ -813,8 +811,8 @@ func flattenOpenZFSFileSystemRootVolume(rs *fsx.Volume) []interface{} { return []interface{}{m} } -func findOpenZFSFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { - output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeOpenzfs) +func findOpenZFSFileSystemByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.FileSystem, error) { + output, err := findFileSystemByIDAndType(ctx, conn, id, awstypes.FileSystemTypeOpenzfs) if err != nil { return nil, err diff --git a/internal/service/fsx/openzfs_file_system_test.go b/internal/service/fsx/openzfs_file_system_test.go index 5d230f8eb034..16a19be43ca9 100644 --- a/internal/service/fsx/openzfs_file_system_test.go +++ b/internal/service/fsx/openzfs_file_system_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -33,12 +33,12 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { func TestAccFSxOpenZFSFileSystem_basic(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -53,7 +53,7 @@ func TestAccFSxOpenZFSFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_volumes", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", ""), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OpenZFSDeploymentTypeSingleAz1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.OpenZFSDeploymentTypeSingleAz1)), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.iops", "192"), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "AUTOMATIC"), @@ -80,7 +80,7 @@ func TestAccFSxOpenZFSFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "64"), - resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, fsx.StorageTypeSsd), + resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, string(awstypes.StorageTypeSsd)), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", acctest.Ct1), resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test.0", names.AttrID), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), @@ -105,12 +105,12 @@ func TestAccFSxOpenZFSFileSystem_basic(t *testing.T) { func TestAccFSxOpenZFSFileSystem_diskIops(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -150,12 +150,12 @@ func TestAccFSxOpenZFSFileSystem_diskIops(t *testing.T) { func TestAccFSxOpenZFSFileSystem_disappears(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -174,12 +174,12 @@ func TestAccFSxOpenZFSFileSystem_disappears(t *testing.T) { func TestAccFSxOpenZFSFileSystem_rootVolume(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2, filesystem3 fsx.FileSystem + var filesystem1, filesystem2, filesystem3 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -362,12 +362,12 @@ func TestAccFSxOpenZFSFileSystem_rootVolume(t *testing.T) { func TestAccFSxOpenZFSFileSystem_securityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -404,12 +404,12 @@ func TestAccFSxOpenZFSFileSystem_securityGroupIDs(t *testing.T) { func TestAccFSxOpenZFSFileSystem_tags(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2, filesystem3 fsx.FileSystem + var filesystem1, filesystem2, filesystem3 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -458,12 +458,12 @@ func TestAccFSxOpenZFSFileSystem_tags(t *testing.T) { func TestAccFSxOpenZFSFileSystem_copyTags(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -505,12 +505,12 @@ func TestAccFSxOpenZFSFileSystem_copyTags(t *testing.T) { func TestAccFSxOpenZFSFileSystem_throughput(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -547,12 +547,12 @@ func TestAccFSxOpenZFSFileSystem_throughput(t *testing.T) { func TestAccFSxOpenZFSFileSystem_storageType(t *testing.T) { ctx := acctest.Context(t) - var filesystem1 fsx.FileSystem + var filesystem1 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -581,12 +581,12 @@ func TestAccFSxOpenZFSFileSystem_storageType(t *testing.T) { func TestAccFSxOpenZFSFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -623,12 +623,12 @@ func TestAccFSxOpenZFSFileSystem_weeklyMaintenanceStartTime(t *testing.T) { func TestAccFSxOpenZFSFileSystem_automaticBackupRetentionDays(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -672,12 +672,12 @@ func TestAccFSxOpenZFSFileSystem_automaticBackupRetentionDays(t *testing.T) { func TestAccFSxOpenZFSFileSystem_kmsKeyID(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -706,12 +706,12 @@ func TestAccFSxOpenZFSFileSystem_kmsKeyID(t *testing.T) { func TestAccFSxOpenZFSFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -748,12 +748,12 @@ func TestAccFSxOpenZFSFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { func TestAccFSxOpenZFSFileSystem_throughputCapacity(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -790,12 +790,12 @@ func TestAccFSxOpenZFSFileSystem_throughputCapacity(t *testing.T) { func TestAccFSxOpenZFSFileSystem_storageCapacity(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -832,12 +832,12 @@ func TestAccFSxOpenZFSFileSystem_storageCapacity(t *testing.T) { func TestAccFSxOpenZFSFileSystem_deploymentType(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -876,12 +876,12 @@ func TestAccFSxOpenZFSFileSystem_deploymentType(t *testing.T) { func TestAccFSxOpenZFSFileSystem_multiAZ(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -896,7 +896,7 @@ func TestAccFSxOpenZFSFileSystem_multiAZ(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_volumes", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", ""), - resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OpenZFSDeploymentTypeMultiAz1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", string(awstypes.OpenZFSDeploymentTypeMultiAz1)), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.iops", "192"), resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "AUTOMATIC"), @@ -923,7 +923,7 @@ func TestAccFSxOpenZFSFileSystem_multiAZ(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "skip_final_backup", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "storage_capacity", "64"), - resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, fsx.StorageTypeSsd), + resource.TestCheckResourceAttr(resourceName, names.AttrStorageType, string(awstypes.StorageTypeSsd)), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", acctest.Ct2), resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test.0", names.AttrID), resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test.1", names.AttrID), @@ -950,12 +950,12 @@ func TestAccFSxOpenZFSFileSystem_multiAZ(t *testing.T) { func TestAccFSxOpenZFSFileSystem_routeTableIDs(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -1002,14 +1002,14 @@ func TestAccFSxOpenZFSFileSystem_routeTableIDs(t *testing.T) { func TestAccFSxOpenZFSFileSystem_deleteConfig(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) acctest.SkipIfEnvVarNotSet(t, "AWS_FSX_CREATE_FINAL_BACKUP") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSFileSystemDestroy(ctx), @@ -1041,14 +1041,14 @@ func TestAccFSxOpenZFSFileSystem_deleteConfig(t *testing.T) { }) } -func testAccCheckOpenZFSFileSystemExists(ctx context.Context, n string, v *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckOpenZFSFileSystemExists(ctx context.Context, n string, v *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindOpenZFSFileSystemByID(ctx, conn, rs.Primary.ID) @@ -1064,7 +1064,7 @@ func testAccCheckOpenZFSFileSystemExists(ctx context.Context, n string, v *fsx.F func testAccCheckOpenZFSFileSystemDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_openzfs_file_system" { @@ -1088,20 +1088,20 @@ func testAccCheckOpenZFSFileSystemDestroy(ctx context.Context) resource.TestChec } } -func testAccCheckOpenZFSFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckOpenZFSFileSystemNotRecreated(i, j *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx for OpenZFS File System (%s) recreated", aws.StringValue(i.FileSystemId)) + if aws.ToString(i.FileSystemId) != aws.ToString(j.FileSystemId) { + return fmt.Errorf("FSx for OpenZFS File System (%s) recreated", aws.ToString(i.FileSystemId)) } return nil } } -func testAccCheckOpenZFSFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckOpenZFSFileSystemRecreated(i, j *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx for OpenZFS File System (%s) not recreated", aws.StringValue(i.FileSystemId)) + if aws.ToString(i.FileSystemId) == aws.ToString(j.FileSystemId) { + return fmt.Errorf("FSx for OpenZFS File System (%s) not recreated", aws.ToString(i.FileSystemId)) } return nil diff --git a/internal/service/fsx/openzfs_snapshot.go b/internal/service/fsx/openzfs_snapshot.go index 9c3d641adcca..68f9047eeb4e 100644 --- a/internal/service/fsx/openzfs_snapshot.go +++ b/internal/service/fsx/openzfs_snapshot.go @@ -9,9 +9,9 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -19,6 +19,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -79,7 +81,7 @@ func resourceOpenZFSSnapshot() *schema.Resource { func resourceOpenZFSSnapshotCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.CreateSnapshotInput{ ClientRequestToken: aws.String(id.UniqueId()), @@ -88,13 +90,13 @@ func resourceOpenZFSSnapshotCreate(ctx context.Context, d *schema.ResourceData, VolumeId: aws.String(d.Get("volume_id").(string)), } - output, err := conn.CreateSnapshotWithContext(ctx, input) + output, err := conn.CreateSnapshot(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx OpenZFS Snapshot: %s", err) } - d.SetId(aws.StringValue(output.Snapshot.SnapshotId)) + d.SetId(aws.ToString(output.Snapshot.SnapshotId)) if _, err := waitSnapshotCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx OpenZFS Snapshot (%s) create: %s", d.Id(), err) @@ -105,7 +107,7 @@ func resourceOpenZFSSnapshotCreate(ctx context.Context, d *schema.ResourceData, func resourceOpenZFSSnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) snapshot, err := findSnapshotByID(ctx, conn, d.Id()) @@ -132,7 +134,7 @@ func resourceOpenZFSSnapshotRead(ctx context.Context, d *schema.ResourceData, me func resourceOpenZFSSnapshotUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &fsx.UpdateSnapshotInput{ @@ -144,7 +146,7 @@ func resourceOpenZFSSnapshotUpdate(ctx context.Context, d *schema.ResourceData, input.Name = aws.String(d.Get(names.AttrName).(string)) } - _, err := conn.UpdateSnapshotWithContext(ctx, input) + _, err := conn.UpdateSnapshot(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx OpenZFS Snapshot (%s): %s", d.Id(), err) @@ -160,14 +162,14 @@ func resourceOpenZFSSnapshotUpdate(ctx context.Context, d *schema.ResourceData, func resourceOpenZFSSnapshotDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) log.Printf("[INFO] Deleting FSx Snapshot: %s", d.Id()) - _, err := conn.DeleteSnapshotWithContext(ctx, &fsx.DeleteSnapshotInput{ + _, err := conn.DeleteSnapshot(ctx, &fsx.DeleteSnapshotInput{ SnapshotId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeSnapshotNotFound) { + if errs.IsA[*awstypes.SnapshotNotFound](err) { return diags } @@ -182,56 +184,53 @@ func resourceOpenZFSSnapshotDelete(ctx context.Context, d *schema.ResourceData, return diags } -func findSnapshotByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Snapshot, error) { +func findSnapshotByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.Snapshot, error) { input := &fsx.DescribeSnapshotsInput{ - SnapshotIds: aws.StringSlice([]string{id}), + SnapshotIds: []string{id}, } - return findSnapshot(ctx, conn, input, tfslices.PredicateTrue[*fsx.Snapshot]()) + return findSnapshot(ctx, conn, input, tfslices.PredicateTrue[*awstypes.Snapshot]()) } -func findSnapshot(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeSnapshotsInput, filter tfslices.Predicate[*fsx.Snapshot]) (*fsx.Snapshot, error) { +func findSnapshot(ctx context.Context, conn *fsx.Client, input *fsx.DescribeSnapshotsInput, filter tfslices.Predicate[*awstypes.Snapshot]) (*awstypes.Snapshot, error) { output, err := findSnapshots(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findSnapshots(ctx context.Context, conn *fsx.FSx, input *fsx.DescribeSnapshotsInput, filter tfslices.Predicate[*fsx.Snapshot]) ([]*fsx.Snapshot, error) { - var output []*fsx.Snapshot +func findSnapshots(ctx context.Context, conn *fsx.Client, input *fsx.DescribeSnapshotsInput, filter tfslices.Predicate[*awstypes.Snapshot]) ([]awstypes.Snapshot, error) { + var output []awstypes.Snapshot - err := conn.DescribeSnapshotsPagesWithContext(ctx, input, func(page *fsx.DescribeSnapshotsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := fsx.NewDescribeSnapshotsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Snapshots { - if v != nil && filter(v) { - output = append(output, v) + if errs.IsA[*awstypes.SnapshotNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeSnapshotNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.Snapshots { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusSnapshot(ctx context.Context, conn *fsx.FSx, id string) retry.StateRefreshFunc { +func statusSnapshot(ctx context.Context, conn *fsx.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findSnapshotByID(ctx, conn, id) @@ -243,14 +242,14 @@ func statusSnapshot(ctx context.Context, conn *fsx.FSx, id string) retry.StateRe return nil, "", err } - return output, aws.StringValue(output.Lifecycle), nil + return output, string(output.Lifecycle), nil } } -func waitSnapshotCreated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { +func waitSnapshotCreated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.Snapshot, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.SnapshotLifecycleCreating, fsx.SnapshotLifecyclePending}, - Target: []string{fsx.SnapshotLifecycleAvailable}, + Pending: enum.Slice(awstypes.SnapshotLifecycleCreating, awstypes.SnapshotLifecyclePending), + Target: enum.Slice(awstypes.SnapshotLifecycleAvailable), Refresh: statusSnapshot(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, @@ -258,9 +257,9 @@ func waitSnapshotCreated(ctx context.Context, conn *fsx.FSx, id string, timeout outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.Snapshot); ok { + if output, ok := outputRaw.(*awstypes.Snapshot); ok { if output.LifecycleTransitionReason != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) + tfresource.SetLastError(err, errors.New(aws.ToString(output.LifecycleTransitionReason.Message))) } return output, err @@ -269,10 +268,10 @@ func waitSnapshotCreated(ctx context.Context, conn *fsx.FSx, id string, timeout return nil, err } -func waitSnapshotUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { +func waitSnapshotUpdated(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.Snapshot, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.SnapshotLifecyclePending}, - Target: []string{fsx.SnapshotLifecycleAvailable}, + Pending: enum.Slice(awstypes.SnapshotLifecyclePending), + Target: enum.Slice(awstypes.SnapshotLifecycleAvailable), Refresh: statusSnapshot(ctx, conn, id), Timeout: timeout, Delay: 150 * time.Second, @@ -280,9 +279,9 @@ func waitSnapshotUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.Snapshot); ok { + if output, ok := outputRaw.(*awstypes.Snapshot); ok { if output.LifecycleTransitionReason != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) + tfresource.SetLastError(err, errors.New(aws.ToString(output.LifecycleTransitionReason.Message))) } return output, err @@ -291,9 +290,9 @@ func waitSnapshotUpdated(ctx context.Context, conn *fsx.FSx, id string, timeout return nil, err } -func waitSnapshotDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { +func waitSnapshotDeleted(ctx context.Context, conn *fsx.Client, id string, timeout time.Duration) (*awstypes.Snapshot, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{fsx.SnapshotLifecyclePending, fsx.SnapshotLifecycleDeleting}, + Pending: enum.Slice(awstypes.SnapshotLifecyclePending, awstypes.SnapshotLifecycleDeleting), Target: []string{}, Refresh: statusSnapshot(ctx, conn, id), Timeout: timeout, @@ -302,9 +301,9 @@ func waitSnapshotDeleted(ctx context.Context, conn *fsx.FSx, id string, timeout outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*fsx.Snapshot); ok { + if output, ok := outputRaw.(*awstypes.Snapshot); ok { if output.LifecycleTransitionReason != nil { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.LifecycleTransitionReason.Message))) + tfresource.SetLastError(err, errors.New(aws.ToString(output.LifecycleTransitionReason.Message))) } return output, err diff --git a/internal/service/fsx/openzfs_snapshot_data_source.go b/internal/service/fsx/openzfs_snapshot_data_source.go index 84dc0da1f7ba..42d83fdbc9ee 100644 --- a/internal/service/fsx/openzfs_snapshot_data_source.go +++ b/internal/service/fsx/openzfs_snapshot_data_source.go @@ -8,8 +8,9 @@ import ( "sort" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -21,7 +22,6 @@ import ( ) // @SDKDataSource("aws_fsx_openzfs_snapshot", name="OpenZFS Snapshot") -// @Tags func dataSourceOpenzfsSnapshot() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceOpenZFSSnapshotRead, @@ -65,12 +65,13 @@ func dataSourceOpenzfsSnapshot() *schema.Resource { func dataSourceOpenZFSSnapshotRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig input := &fsx.DescribeSnapshotsInput{} if v, ok := d.GetOk("snapshot_ids"); ok && len(v.([]interface{})) > 0 { - input.SnapshotIds = flex.ExpandStringList(v.([]interface{})) + input.SnapshotIds = flex.ExpandStringValueList(v.([]interface{})) } input.Filters = append(input.Filters, newSnapshotFilterList( @@ -81,7 +82,7 @@ func dataSourceOpenZFSSnapshotRead(ctx context.Context, d *schema.ResourceData, input.Filters = nil } - snapshots, err := findSnapshots(ctx, conn, input, tfslices.PredicateTrue[*fsx.Snapshot]()) + snapshots, err := findSnapshots(ctx, conn, input, tfslices.PredicateTrue[*awstypes.Snapshot]()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading FSx Snapshots: %s", err) @@ -98,19 +99,31 @@ func dataSourceOpenZFSSnapshotRead(ctx context.Context, d *schema.ResourceData, } sort.Slice(snapshots, func(i, j int) bool { - return aws.TimeValue(snapshots[i].CreationTime).Unix() > aws.TimeValue(snapshots[j].CreationTime).Unix() + return aws.ToTime(snapshots[i].CreationTime).Unix() > aws.ToTime(snapshots[j].CreationTime).Unix() }) } snapshot := snapshots[0] - d.SetId(aws.StringValue(snapshot.SnapshotId)) - d.Set(names.AttrARN, snapshot.ResourceARN) + d.SetId(aws.ToString(snapshot.SnapshotId)) + arn := aws.ToString(snapshot.ResourceARN) + d.Set(names.AttrARN, arn) d.Set(names.AttrCreationTime, snapshot.CreationTime.Format(time.RFC3339)) d.Set(names.AttrName, snapshot.Name) d.Set(names.AttrSnapshotID, snapshot.SnapshotId) d.Set("volume_id", snapshot.VolumeId) - setTagsOut(ctx, snapshot.Tags) + // Snapshot tags aren't set in the Describe response. + // setTagsOut(ctx, snapshot.Tags) + + tags, err := listTags(ctx, conn, arn) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing tags for FSx OpenZFS Snapshot (%s): %s", arn, err) + } + + if err := d.Set(names.AttrTags, tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) + } return diags } diff --git a/internal/service/fsx/openzfs_snapshot_data_source_test.go b/internal/service/fsx/openzfs_snapshot_data_source_test.go index f7b33ed197c2..b123bad2eb78 100644 --- a/internal/service/fsx/openzfs_snapshot_data_source_test.go +++ b/internal/service/fsx/openzfs_snapshot_data_source_test.go @@ -7,7 +7,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/fsx" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -23,7 +22,7 @@ func TestAccFSxOpenZFSSnapshotDataSource_basic(t *testing.T) { rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSSnapshotDestroy(ctx), @@ -81,6 +80,10 @@ func testAccOpenZFSSnapshotDataSourceConfig_basic(rName string) string { resource "aws_fsx_openzfs_snapshot" "test" { name = %[1]q volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + + tags = { + Name = %[1]q + } } data "aws_fsx_openzfs_snapshot" "test" { @@ -94,6 +97,10 @@ func testAccOpenZFSSnapshotDataSourceConfig_filterFileSystemId(rName string) str resource "aws_fsx_openzfs_snapshot" "test" { name = %[1]q volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + + tags = { + Name = %[1]q + } } data "aws_fsx_openzfs_snapshot" "test" { @@ -110,6 +117,10 @@ func testAccOpenZFSSnapshotDataSourceConfig_filterVolumeId(rName string) string resource "aws_fsx_openzfs_snapshot" "test" { name = %[1]q volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + + tags = { + Name = %[1]q + } } data "aws_fsx_openzfs_snapshot" "test" { @@ -126,12 +137,20 @@ func testAccOpenZFSSnapshotDataSourceConfig_mostRecent(rName, rName2 string) str resource "aws_fsx_openzfs_snapshot" "test" { name = %[1]q volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + + tags = { + Name = %[1]q + } } resource "aws_fsx_openzfs_snapshot" "latest" { # Ensure that this snapshot is created after the other. name = %[2]q volume_id = aws_fsx_openzfs_snapshot.test.volume_id + + tags = { + Name = %[2]q + } } data "aws_fsx_openzfs_snapshot" "test" { diff --git a/internal/service/fsx/openzfs_snapshot_test.go b/internal/service/fsx/openzfs_snapshot_test.go index d78c37e06de9..438d52b97bf8 100644 --- a/internal/service/fsx/openzfs_snapshot_test.go +++ b/internal/service/fsx/openzfs_snapshot_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,12 +23,12 @@ import ( func TestAccFSxOpenZFSSnapshot_basic(t *testing.T) { ctx := acctest.Context(t) - var snapshot fsx.Snapshot + var snapshot awstypes.Snapshot resourceName := "aws_fsx_openzfs_snapshot.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSSnapshotDestroy(ctx), @@ -55,12 +55,12 @@ func TestAccFSxOpenZFSSnapshot_basic(t *testing.T) { func TestAccFSxOpenZFSSnapshot_disappears(t *testing.T) { ctx := acctest.Context(t) - var snapshot fsx.Snapshot + var snapshot awstypes.Snapshot resourceName := "aws_fsx_openzfs_snapshot.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSSnapshotDestroy(ctx), @@ -79,12 +79,12 @@ func TestAccFSxOpenZFSSnapshot_disappears(t *testing.T) { func TestAccFSxOpenZFSSnapshot_tags(t *testing.T) { ctx := acctest.Context(t) - var snapshot fsx.Snapshot + var snapshot awstypes.Snapshot resourceName := "aws_fsx_openzfs_snapshot.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSSnapshotDestroy(ctx), @@ -125,13 +125,13 @@ func TestAccFSxOpenZFSSnapshot_tags(t *testing.T) { func TestAccFSxOpenZFSSnapshot_name(t *testing.T) { ctx := acctest.Context(t) - var snapshot1, snapshot2 fsx.Snapshot + var snapshot1, snapshot2 awstypes.Snapshot resourceName := "aws_fsx_openzfs_snapshot.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSSnapshotDestroy(ctx), @@ -162,12 +162,12 @@ func TestAccFSxOpenZFSSnapshot_name(t *testing.T) { func TestAccFSxOpenZFSSnapshot_childVolume(t *testing.T) { ctx := acctest.Context(t) - var snapshot fsx.Snapshot + var snapshot awstypes.Snapshot resourceName := "aws_fsx_openzfs_snapshot.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSSnapshotDestroy(ctx), @@ -192,13 +192,13 @@ func TestAccFSxOpenZFSSnapshot_childVolume(t *testing.T) { func TestAccFSxOpenZFSSnapshot_volumeID(t *testing.T) { ctx := acctest.Context(t) - var snapshot1, snapshot2 fsx.Snapshot + var snapshot1, snapshot2 awstypes.Snapshot resourceName := "aws_fsx_openzfs_snapshot.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSSnapshotDestroy(ctx), @@ -227,14 +227,14 @@ func TestAccFSxOpenZFSSnapshot_volumeID(t *testing.T) { }) } -func testAccCheckOpenZFSSnapshotExists(ctx context.Context, n string, v *fsx.Snapshot) resource.TestCheckFunc { +func testAccCheckOpenZFSSnapshotExists(ctx context.Context, n string, v *awstypes.Snapshot) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindSnapshotByID(ctx, conn, rs.Primary.ID) @@ -250,7 +250,7 @@ func testAccCheckOpenZFSSnapshotExists(ctx context.Context, n string, v *fsx.Sna func testAccCheckOpenZFSSnapshotDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_openzfs_snapshot" { @@ -274,20 +274,20 @@ func testAccCheckOpenZFSSnapshotDestroy(ctx context.Context) resource.TestCheckF } } -func testAccCheckOpenZFSSnapshotNotRecreated(i, j *fsx.Snapshot) resource.TestCheckFunc { +func testAccCheckOpenZFSSnapshotNotRecreated(i, j *awstypes.Snapshot) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.SnapshotId) != aws.StringValue(j.SnapshotId) { - return fmt.Errorf("FSx OpenZFS Snapshot (%s) recreated", aws.StringValue(i.SnapshotId)) + if aws.ToString(i.SnapshotId) != aws.ToString(j.SnapshotId) { + return fmt.Errorf("FSx OpenZFS Snapshot (%s) recreated", aws.ToString(i.SnapshotId)) } return nil } } -func testAccCheckOpenZFSSnapshotRecreated(i, j *fsx.Snapshot) resource.TestCheckFunc { +func testAccCheckOpenZFSSnapshotRecreated(i, j *awstypes.Snapshot) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.SnapshotId) == aws.StringValue(j.SnapshotId) { - return fmt.Errorf("FSx OpenZFS Snapshot (%s) not recreated", aws.StringValue(i.SnapshotId)) + if aws.ToString(i.SnapshotId) == aws.ToString(j.SnapshotId) { + return fmt.Errorf("FSx OpenZFS Snapshot (%s) not recreated", aws.ToString(i.SnapshotId)) } return nil diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index bd71e1476608..c5aebb1bbf08 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -9,14 +9,16 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -59,18 +61,18 @@ func resourceOpenZFSVolume() *schema.Resource { ForceNew: true, }, "data_compression_type": { - Type: schema.TypeString, - Optional: true, - Default: fsx.OpenZFSDataCompressionTypeNone, - ValidateFunc: validation.StringInSlice(fsx.OpenZFSDataCompressionType_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.OpenZFSDataCompressionTypeNone, + ValidateDiagFunc: enum.Validate[awstypes.OpenZFSDataCompressionType](), }, "delete_volume_options": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(fsx.DeleteFileSystemOpenZFSOption_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.DeleteFileSystemOpenZFSOption](), }, }, names.AttrName: { @@ -122,9 +124,9 @@ func resourceOpenZFSVolume() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "copy_strategy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(fsx.OpenZFSCopyStrategy_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.OpenZFSCopyStrategy](), }, "snapshot_arn": { Type: schema.TypeString, @@ -186,9 +188,9 @@ func resourceOpenZFSVolume() *schema.Resource { ValidateFunc: validation.IntBetween(0, 2147483647), }, names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(fsx.OpenZFSQuotaType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.OpenZFSQuotaType](), }, }, }, @@ -196,11 +198,11 @@ func resourceOpenZFSVolume() *schema.Resource { names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), names.AttrVolumeType: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: fsx.VolumeTypeOpenzfs, - ValidateFunc: validation.StringInSlice(fsx.VolumeType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.VolumeTypeOpenzfs, + ValidateDiagFunc: enum.Validate[awstypes.VolumeType](), }, }, @@ -210,9 +212,9 @@ func resourceOpenZFSVolume() *schema.Resource { func resourceOpenZFSVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) - openzfsConfig := &fsx.CreateOpenZFSVolumeConfiguration{ + openzfsConfig := &awstypes.CreateOpenZFSVolumeConfiguration{ ParentVolumeId: aws.String(d.Get("parent_volume_id").(string)), } @@ -221,7 +223,7 @@ func resourceOpenZFSVolumeCreate(ctx context.Context, d *schema.ResourceData, me } if v, ok := d.GetOk("data_compression_type"); ok { - openzfsConfig.DataCompressionType = aws.String(v.(string)) + openzfsConfig.DataCompressionType = awstypes.OpenZFSDataCompressionType(v.(string)) } if v, ok := d.GetOk("nfs_exports"); ok { @@ -237,15 +239,15 @@ func resourceOpenZFSVolumeCreate(ctx context.Context, d *schema.ResourceData, me } if v, ok := d.GetOk("record_size_kib"); ok { - openzfsConfig.RecordSizeKiB = aws.Int64(int64(v.(int))) + openzfsConfig.RecordSizeKiB = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("storage_capacity_quota_gib"); ok { - openzfsConfig.StorageCapacityQuotaGiB = aws.Int64(int64(v.(int))) + openzfsConfig.StorageCapacityQuotaGiB = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("storage_capacity_reservation_gib"); ok { - openzfsConfig.StorageCapacityReservationGiB = aws.Int64(int64(v.(int))) + openzfsConfig.StorageCapacityReservationGiB = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("user_and_group_quotas"); ok { @@ -258,16 +260,16 @@ func resourceOpenZFSVolumeCreate(ctx context.Context, d *schema.ResourceData, me Name: aws.String(name), OpenZFSConfiguration: openzfsConfig, Tags: getTagsIn(ctx), - VolumeType: aws.String(d.Get(names.AttrVolumeType).(string)), + VolumeType: awstypes.VolumeType(d.Get(names.AttrVolumeType).(string)), } - output, err := conn.CreateVolumeWithContext(ctx, input) + output, err := conn.CreateVolume(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for OpenZFS Volume (%s): %s", name, err) } - d.SetId(aws.StringValue(output.Volume.VolumeId)) + d.SetId(aws.ToString(output.Volume.VolumeId)) if _, err := waitVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) create: %s", d.Id(), err) @@ -278,7 +280,7 @@ func resourceOpenZFSVolumeCreate(ctx context.Context, d *schema.ResourceData, me func resourceOpenZFSVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) volume, err := findOpenZFSVolumeByID(ctx, conn, d.Id()) @@ -314,20 +316,21 @@ func resourceOpenZFSVolumeRead(ctx context.Context, d *schema.ResourceData, meta } d.Set(names.AttrVolumeType, volume.VolumeType) - setTagsOut(ctx, volume.Tags) + // Volume tags aren't set in the Describe response. + // setTagsOut(ctx, volume.Tags) return diags } func resourceOpenZFSVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { - openzfsConfig := &fsx.UpdateOpenZFSVolumeConfiguration{} + openzfsConfig := &awstypes.UpdateOpenZFSVolumeConfiguration{} if d.HasChange("data_compression_type") { - openzfsConfig.DataCompressionType = aws.String(d.Get("data_compression_type").(string)) + openzfsConfig.DataCompressionType = awstypes.OpenZFSDataCompressionType(d.Get("data_compression_type").(string)) } if d.HasChange("nfs_exports") { @@ -339,15 +342,15 @@ func resourceOpenZFSVolumeUpdate(ctx context.Context, d *schema.ResourceData, me } if d.HasChange("record_size_kib") { - openzfsConfig.RecordSizeKiB = aws.Int64(int64(d.Get("record_size_kib").(int))) + openzfsConfig.RecordSizeKiB = aws.Int32(int32(d.Get("record_size_kib").(int))) } if d.HasChange("storage_capacity_quota_gib") { - openzfsConfig.StorageCapacityQuotaGiB = aws.Int64(int64(d.Get("storage_capacity_quota_gib").(int))) + openzfsConfig.StorageCapacityQuotaGiB = aws.Int32(int32(d.Get("storage_capacity_quota_gib").(int))) } if d.HasChange("storage_capacity_reservation_gib") { - openzfsConfig.StorageCapacityReservationGiB = aws.Int64(int64(d.Get("storage_capacity_reservation_gib").(int))) + openzfsConfig.StorageCapacityReservationGiB = aws.Int32(int32(d.Get("storage_capacity_reservation_gib").(int))) } if d.HasChange("user_and_group_quotas") { @@ -365,7 +368,7 @@ func resourceOpenZFSVolumeUpdate(ctx context.Context, d *schema.ResourceData, me } startTime := time.Now() - _, err := conn.UpdateVolumeWithContext(ctx, input) + _, err := conn.UpdateVolume(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for OpenZFS Volume (%s): %s", d.Id(), err) @@ -375,8 +378,8 @@ func resourceOpenZFSVolumeUpdate(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) update: %s", d.Id(), err) } - if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeVolumeUpdate, err) + if _, err := waitVolumeAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeVolumeUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for OpenZFS Volume (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeVolumeUpdate, err) } } @@ -385,22 +388,22 @@ func resourceOpenZFSVolumeUpdate(ctx context.Context, d *schema.ResourceData, me func resourceOpenZFSVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.DeleteVolumeInput{ VolumeId: aws.String(d.Id()), } if v, ok := d.GetOk("delete_volume_options"); ok && len(v.([]interface{})) > 0 { - input.OpenZFSConfiguration = &fsx.DeleteVolumeOpenZFSConfiguration{ - Options: flex.ExpandStringList(v.([]interface{})), + input.OpenZFSConfiguration = &awstypes.DeleteVolumeOpenZFSConfiguration{ + Options: flex.ExpandStringyValueList[awstypes.DeleteOpenZFSVolumeOption](v.([]interface{})), } } log.Printf("[DEBUG] Deleting FSx for OpenZFS Volume: %s", d.Id()) - _, err := conn.DeleteVolumeWithContext(ctx, input) + _, err := conn.DeleteVolume(ctx, input) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { + if errs.IsA[*awstypes.VolumeNotFound](err) { return diags } @@ -415,56 +418,56 @@ func resourceOpenZFSVolumeDelete(ctx context.Context, d *schema.ResourceData, me return diags } -func expandOpenZFSUserOrGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { - quotas := []*fsx.OpenZFSUserOrGroupQuota{} +func expandOpenZFSUserOrGroupQuotas(cfg []interface{}) []awstypes.OpenZFSUserOrGroupQuota { + quotas := []awstypes.OpenZFSUserOrGroupQuota{} for _, quota := range cfg { expandedQuota := expandOpenZFSUserOrGroupQuota(quota.(map[string]interface{})) if expandedQuota != nil { - quotas = append(quotas, expandedQuota) + quotas = append(quotas, *expandedQuota) } } return quotas } -func expandOpenZFSUserOrGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { +func expandOpenZFSUserOrGroupQuota(conf map[string]interface{}) *awstypes.OpenZFSUserOrGroupQuota { if len(conf) < 1 { return nil } - out := fsx.OpenZFSUserOrGroupQuota{} + out := awstypes.OpenZFSUserOrGroupQuota{} if v, ok := conf[names.AttrID].(int); ok { - out.Id = aws.Int64(int64(v)) + out.Id = aws.Int32(int32(v)) } if v, ok := conf["storage_capacity_quota_gib"].(int); ok { - out.StorageCapacityQuotaGiB = aws.Int64(int64(v)) + out.StorageCapacityQuotaGiB = aws.Int32(int32(v)) } if v, ok := conf[names.AttrType].(string); ok { - out.Type = aws.String(v) + out.Type = awstypes.OpenZFSQuotaType(v) } return &out } -func expandOpenZFSNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name - exports := []*fsx.OpenZFSNfsExport{} +func expandOpenZFSNfsExports(cfg []interface{}) []awstypes.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name + exports := []awstypes.OpenZFSNfsExport{} for _, export := range cfg { expandedExport := expandOpenZFSNfsExport(export.(map[string]interface{})) if expandedExport != nil { - exports = append(exports, expandedExport) + exports = append(exports, *expandedExport) } } return exports } -func expandOpenZFSNfsExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name - out := fsx.OpenZFSNfsExport{} +func expandOpenZFSNfsExport(cfg map[string]interface{}) *awstypes.OpenZFSNfsExport { // nosemgrep:ci.caps4-in-func-name + out := awstypes.OpenZFSNfsExport{} if v, ok := cfg["client_configurations"]; ok { out.ClientConfigurations = expandOpenZFSClientConfigurations(v.(*schema.Set).List()) @@ -473,44 +476,44 @@ func expandOpenZFSNfsExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { return &out } -func expandOpenZFSClientConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { - configurations := []*fsx.OpenZFSClientConfiguration{} +func expandOpenZFSClientConfigurations(cfg []interface{}) []awstypes.OpenZFSClientConfiguration { + configurations := []awstypes.OpenZFSClientConfiguration{} for _, configuration := range cfg { expandedConfiguration := expandOpenZFSClientConfiguration(configuration.(map[string]interface{})) if expandedConfiguration != nil { - configurations = append(configurations, expandedConfiguration) + configurations = append(configurations, *expandedConfiguration) } } return configurations } -func expandOpenZFSClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { - out := fsx.OpenZFSClientConfiguration{} +func expandOpenZFSClientConfiguration(conf map[string]interface{}) *awstypes.OpenZFSClientConfiguration { + out := awstypes.OpenZFSClientConfiguration{} if v, ok := conf["clients"].(string); ok && len(v) > 0 { out.Clients = aws.String(v) } if v, ok := conf["options"].([]interface{}); ok { - out.Options = flex.ExpandStringList(v) + out.Options = flex.ExpandStringValueList(v) } return &out } -func expandCreateOpenZFSOriginSnapshotConfiguration(cfg []interface{}) *fsx.CreateOpenZFSOriginSnapshotConfiguration { +func expandCreateOpenZFSOriginSnapshotConfiguration(cfg []interface{}) *awstypes.CreateOpenZFSOriginSnapshotConfiguration { if len(cfg) < 1 { return nil } conf := cfg[0].(map[string]interface{}) - out := fsx.CreateOpenZFSOriginSnapshotConfiguration{} + out := awstypes.CreateOpenZFSOriginSnapshotConfiguration{} if v, ok := conf["copy_strategy"].(string); ok { - out.CopyStrategy = aws.String(v) + out.CopyStrategy = awstypes.OpenZFSCopyStrategy(v) } if v, ok := conf["snapshot_arn"].(string); ok { @@ -520,15 +523,13 @@ func expandCreateOpenZFSOriginSnapshotConfiguration(cfg []interface{}) *fsx.Crea return &out } -func flattenOpenZFSNfsExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { // nosemgrep:ci.caps4-in-func-name +func flattenOpenZFSNfsExports(rs []awstypes.OpenZFSNfsExport) []map[string]interface{} { // nosemgrep:ci.caps4-in-func-name exports := make([]map[string]interface{}, 0) for _, export := range rs { - if export != nil { - cfg := make(map[string]interface{}) - cfg["client_configurations"] = flattenOpenZFSClientConfigurations(export.ClientConfigurations) - exports = append(exports, cfg) - } + cfg := make(map[string]interface{}) + cfg["client_configurations"] = flattenOpenZFSClientConfigurations(export.ClientConfigurations) + exports = append(exports, cfg) } if len(exports) > 0 { @@ -538,16 +539,14 @@ func flattenOpenZFSNfsExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface return nil } -func flattenOpenZFSClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { +func flattenOpenZFSClientConfigurations(rs []awstypes.OpenZFSClientConfiguration) []map[string]interface{} { configurations := make([]map[string]interface{}, 0) for _, configuration := range rs { - if configuration != nil { - cfg := make(map[string]interface{}) - cfg["clients"] = aws.StringValue(configuration.Clients) - cfg["options"] = flex.FlattenStringList(configuration.Options) - configurations = append(configurations, cfg) - } + cfg := make(map[string]interface{}) + cfg["clients"] = aws.ToString(configuration.Clients) + cfg["options"] = flex.FlattenStringValueList(configuration.Options) + configurations = append(configurations, cfg) } if len(configurations) > 0 { @@ -557,17 +556,15 @@ func flattenOpenZFSClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) [] return nil } -func flattenOpenZFSUserOrGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { +func flattenOpenZFSUserOrGroupQuotas(rs []awstypes.OpenZFSUserOrGroupQuota) []map[string]interface{} { quotas := make([]map[string]interface{}, 0) for _, quota := range rs { - if quota != nil { - cfg := make(map[string]interface{}) - cfg[names.AttrID] = aws.Int64Value(quota.Id) - cfg["storage_capacity_quota_gib"] = aws.Int64Value(quota.StorageCapacityQuotaGiB) - cfg[names.AttrType] = aws.StringValue(quota.Type) - quotas = append(quotas, cfg) - } + cfg := make(map[string]interface{}) + cfg[names.AttrID] = aws.ToInt32(quota.Id) + cfg["storage_capacity_quota_gib"] = aws.ToInt32(quota.StorageCapacityQuotaGiB) + cfg[names.AttrType] = string(quota.Type) + quotas = append(quotas, cfg) } if len(quotas) > 0 { @@ -577,24 +574,22 @@ func flattenOpenZFSUserOrGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[st return nil } -func flattenOpenZFSOriginSnapshotConfiguration(rs *fsx.OpenZFSOriginSnapshotConfiguration) []interface{} { +func flattenOpenZFSOriginSnapshotConfiguration(rs *awstypes.OpenZFSOriginSnapshotConfiguration) []interface{} { if rs == nil { return []interface{}{} } m := make(map[string]interface{}) - if rs.CopyStrategy != nil { - m["copy_strategy"] = aws.StringValue(rs.CopyStrategy) - } + m["copy_strategy"] = string(rs.CopyStrategy) if rs.SnapshotARN != nil { - m["snapshot_arn"] = aws.StringValue(rs.SnapshotARN) + m["snapshot_arn"] = aws.ToString(rs.SnapshotARN) } return []interface{}{m} } -func findOpenZFSVolumeByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.Volume, error) { - output, err := findVolumeByIDAndType(ctx, conn, id, fsx.VolumeTypeOpenzfs) +func findOpenZFSVolumeByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.Volume, error) { + output, err := findVolumeByIDAndType(ctx, conn, id, awstypes.VolumeTypeOpenzfs) if err != nil { return nil, err diff --git a/internal/service/fsx/openzfs_volume_test.go b/internal/service/fsx/openzfs_volume_test.go index e8d59976a42e..579708fd0d25 100644 --- a/internal/service/fsx/openzfs_volume_test.go +++ b/internal/service/fsx/openzfs_volume_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,12 +23,12 @@ import ( func TestAccFSxOpenZFSVolume_basic(t *testing.T) { ctx := acctest.Context(t) - var volume fsx.Volume + var volume awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -64,12 +64,12 @@ func TestAccFSxOpenZFSVolume_basic(t *testing.T) { func TestAccFSxOpenZFSVolume_disappears(t *testing.T) { ctx := acctest.Context(t) - var volume fsx.Volume + var volume awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -88,14 +88,14 @@ func TestAccFSxOpenZFSVolume_disappears(t *testing.T) { func TestAccFSxOpenZFSVolume_parentVolume(t *testing.T) { ctx := acctest.Context(t) - var volume, volume2 fsx.Volume + var volume, volume2 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" resourceName2 := "aws_fsx_openzfs_volume.test2" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -121,12 +121,12 @@ func TestAccFSxOpenZFSVolume_parentVolume(t *testing.T) { func TestAccFSxOpenZFSVolume_tags(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2, volume3 fsx.Volume + var volume1, volume2, volume3 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -169,12 +169,12 @@ func TestAccFSxOpenZFSVolume_tags(t *testing.T) { func TestAccFSxOpenZFSVolume_copyTags(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -216,13 +216,13 @@ func TestAccFSxOpenZFSVolume_copyTags(t *testing.T) { func TestAccFSxOpenZFSVolume_name(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -253,12 +253,12 @@ func TestAccFSxOpenZFSVolume_name(t *testing.T) { func TestAccFSxOpenZFSVolume_dataCompressionType(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -289,12 +289,12 @@ func TestAccFSxOpenZFSVolume_dataCompressionType(t *testing.T) { func TestAccFSxOpenZFSVolume_readOnly(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -325,12 +325,12 @@ func TestAccFSxOpenZFSVolume_readOnly(t *testing.T) { func TestAccFSxOpenZFSVolume_recordSizeKib(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -361,12 +361,12 @@ func TestAccFSxOpenZFSVolume_recordSizeKib(t *testing.T) { func TestAccFSxOpenZFSVolume_storageCapacity(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -399,12 +399,12 @@ func TestAccFSxOpenZFSVolume_storageCapacity(t *testing.T) { func TestAccFSxOpenZFSVolume_nfsExports(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -451,12 +451,12 @@ func TestAccFSxOpenZFSVolume_nfsExports(t *testing.T) { func TestAccFSxOpenZFSVolume_userAndGroupQuotas(t *testing.T) { ctx := acctest.Context(t) - var volume1, volume2 fsx.Volume + var volume1, volume2 awstypes.Volume resourceName := "aws_fsx_openzfs_volume.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckOpenZFSVolumeDestroy(ctx), @@ -510,14 +510,14 @@ func TestAccFSxOpenZFSVolume_userAndGroupQuotas(t *testing.T) { }) } -func testAccCheckOpenZFSVolumeExists(ctx context.Context, n string, v *fsx.Volume) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeExists(ctx context.Context, n string, v *awstypes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindOpenZFSVolumeByID(ctx, conn, rs.Primary.ID) @@ -533,7 +533,7 @@ func testAccCheckOpenZFSVolumeExists(ctx context.Context, n string, v *fsx.Volum func testAccCheckOpenZFSVolumeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_openzfs_volume" { @@ -556,20 +556,20 @@ func testAccCheckOpenZFSVolumeDestroy(ctx context.Context) resource.TestCheckFun } } -func testAccCheckOpenZFSVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeNotRecreated(i, j *awstypes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.VolumeId) != aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx for OpenZFS Volume (%s) recreated", aws.StringValue(i.VolumeId)) + if aws.ToString(i.VolumeId) != aws.ToString(j.VolumeId) { + return fmt.Errorf("FSx for OpenZFS Volume (%s) recreated", aws.ToString(i.VolumeId)) } return nil } } -func testAccCheckOpenZFSVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { +func testAccCheckOpenZFSVolumeRecreated(i, j *awstypes.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.VolumeId) == aws.StringValue(j.VolumeId) { - return fmt.Errorf("FSx for OpenZFS Volume (%s) not recreated", aws.StringValue(i.VolumeId)) + if aws.ToString(i.VolumeId) == aws.ToString(j.VolumeId) { + return fmt.Errorf("FSx for OpenZFS Volume (%s) not recreated", aws.ToString(i.VolumeId)) } return nil diff --git a/internal/service/fsx/service_endpoint_resolver_gen.go b/internal/service/fsx/service_endpoint_resolver_gen.go index 5b3ebdb7ac25..43a93b544c20 100644 --- a/internal/service/fsx/service_endpoint_resolver_gen.go +++ b/internal/service/fsx/service_endpoint_resolver_gen.go @@ -6,65 +6,63 @@ import ( "context" "fmt" "net" - "net/url" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + fsx_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fsx" + smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -var _ endpoints_sdkv1.Resolver = resolverSDKv1{} +var _ fsx_sdkv2.EndpointResolverV2 = resolverSDKv2{} -type resolverSDKv1 struct { - ctx context.Context +type resolverSDKv2 struct { + defaultResolver fsx_sdkv2.EndpointResolverV2 } -func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { - return resolverSDKv1{ - ctx: ctx, +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: fsx_sdkv2.NewDefaultEndpointResolverV2(), } } -func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { - ctx := r.ctx +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params fsx_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) - var opt endpoints_sdkv1.Options - opt.Set(opts...) - - useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) - defaultResolver := endpoints_sdkv1.DefaultResolver() + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } - if useFIPS { + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) - endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) if err != nil { return endpoint, err } tflog.Debug(ctx, "endpoint resolved", map[string]any{ - "tf_aws.endpoint": endpoint.URL, + "tf_aws.endpoint": endpoint.URI.String(), }) - var endpointURL *url.URL - endpointURL, err = url.Parse(endpoint.URL) - if err != nil { - return endpoint, err - } - - hostname := endpointURL.Hostname() + hostname := endpoint.URI.Hostname() _, err = net.LookupHost(hostname) if err != nil { if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ "tf_aws.hostname": hostname, }) - opts = append(opts, func(o *endpoints_sdkv1.Options) { - o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - }) + params.UseFIPS = aws_sdkv2.Bool(false) } else { - err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + err = fmt.Errorf("looking up fsx endpoint %q: %s", hostname, err) return } } else { @@ -72,5 +70,13 @@ func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoin } } - return defaultResolver.EndpointFor(service, region, opts...) + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*fsx_sdkv2.Options) { + return func(o *fsx_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } } diff --git a/internal/service/fsx/service_endpoints_gen_test.go b/internal/service/fsx/service_endpoints_gen_test.go index 6ca13f8d7b94..ff2d965b7431 100644 --- a/internal/service/fsx/service_endpoints_gen_test.go +++ b/internal/service/fsx/service_endpoints_gen_test.go @@ -4,18 +4,22 @@ package fsx_test import ( "context" + "errors" "fmt" "maps" "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - fsx_sdkv1 "github.com/aws/aws-sdk-go/service/fsx" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + fsx_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fsx" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -240,54 +244,63 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } func defaultEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := fsx_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(fsx_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), fsx_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func defaultFIPSEndpoint(region string) (url.URL, error) { - r := endpoints.DefaultResolver() + r := fsx_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(fsx_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), fsx_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return *url, nil + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.FSxConn(ctx) + client := meta.FSxClient(ctx) - req, _ := client.DescribeFileSystemsRequest(&fsx_sdkv1.DescribeFileSystemsInput{}) + var result apiCallParams - req.HTTPRequest.URL.Path = "/" - - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.DescribeFileSystems(ctx, &fsx_sdkv2.DescribeFileSystemsInput{}, + func(opts *fsx_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -466,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/fsx/service_package_gen.go b/internal/service/fsx/service_package_gen.go index 4b4ad114d644..ddd11413804a 100644 --- a/internal/service/fsx/service_package_gen.go +++ b/internal/service/fsx/service_package_gen.go @@ -5,10 +5,8 @@ package fsx import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - fsx_sdkv1 "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + fsx_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fsx" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -36,7 +34,6 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac Factory: dataSourceONTAPStorageVirtualMachine, TypeName: "aws_fsx_ontap_storage_virtual_machine", Name: "ONTAP Storage Virtual Machine", - Tags: &types.ServicePackageResourceTags{}, }, { Factory: dataSourceONTAPStorageVirtualMachines, @@ -47,7 +44,6 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac Factory: dataSourceOpenzfsSnapshot, TypeName: "aws_fsx_openzfs_snapshot", Name: "OpenZFS Snapshot", - Tags: &types.ServicePackageResourceTags{}, }, { Factory: dataSourceWindowsFileSystem, @@ -154,22 +150,14 @@ func (p *servicePackage) ServicePackageName() string { return names.FSx } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*fsx_sdkv1.FSx, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - } else { - cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) - } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*fsx_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return fsx_sdkv1.New(sess.Copy(&cfg)), nil + return fsx_sdkv2.NewFromConfig(cfg, + fsx_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 488be4ae0f78..07e5443cbce3 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -7,12 +7,13 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -87,36 +88,34 @@ func sweepBackups(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.FSxConn(ctx) + conn := client.FSxClient(ctx) sweepResources := make([]sweep.Sweepable, 0) var errs *multierror.Error input := &fsx.DescribeBackupsInput{} - err = conn.DescribeBackupsPagesWithContext(ctx, input, func(page *fsx.DescribeBackupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := fsx.NewDescribeBackupsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx Backups for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx Backups for %s: %w", region, err)) } for _, fs := range page.Backups { r := resourceBackup() d := r.Data(nil) - d.SetId(aws.StringValue(fs.BackupId)) + d.SetId(aws.ToString(fs.BackupId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing FSx Backups for %s: %w", region, err)) - } - - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx Backups for %s: %w", region, err)) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { + if awsv2.SkipSweepError(errs.ErrorOrNil()) { log.Printf("[WARN] Skipping FSx Backups sweep for %s: %s", region, errs) return nil } @@ -132,40 +131,38 @@ func sweepLustreFileSystems(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.FSxConn(ctx) + conn := client.FSxClient(ctx) sweepResources := make([]sweep.Sweepable, 0) var errs *multierror.Error input := &fsx.DescribeFileSystemsInput{} - err = conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := fsx.NewDescribeFileSystemsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx Lustre File Systems for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx Lustre File Systems for %s: %w", region, err)) } for _, fs := range page.FileSystems { - if aws.StringValue(fs.FileSystemType) != fsx.FileSystemTypeLustre { + if fs.FileSystemType != awstypes.FileSystemTypeLustre { continue } r := resourceLustreFileSystem() d := r.Data(nil) - d.SetId(aws.StringValue(fs.FileSystemId)) + d.SetId(aws.ToString(fs.FileSystemId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing FSx Lustre File Systems for %s: %w", region, err)) - } - - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx Lustre File Systems for %s: %w", region, err)) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { + if awsv2.SkipSweepError(errs.ErrorOrNil()) { log.Printf("[WARN] Skipping FSx Lustre File System sweep for %s: %s", region, errs) return nil } @@ -181,40 +178,38 @@ func sweepONTAPFileSystems(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.FSxConn(ctx) + conn := client.FSxClient(ctx) sweepResources := make([]sweep.Sweepable, 0) var errs *multierror.Error input := &fsx.DescribeFileSystemsInput{} - err = conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := fsx.NewDescribeFileSystemsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx ONTAP File Systems for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx ONTAP File Systems for %s: %w", region, err)) } for _, fs := range page.FileSystems { - if aws.StringValue(fs.FileSystemType) != fsx.FileSystemTypeOntap { + if fs.FileSystemType != awstypes.FileSystemTypeOntap { continue } r := resourceONTAPFileSystem() d := r.Data(nil) - d.SetId(aws.StringValue(fs.FileSystemId)) + d.SetId(aws.ToString(fs.FileSystemId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing FSx ONTAP File Systems for %s: %w", region, err)) - } - - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx ONTAP File Systems for %s: %w", region, err)) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { + if awsv2.SkipSweepError(errs.ErrorOrNil()) { log.Printf("[WARN] Skipping FSx ONTAP File System sweep for %s: %s", region, errs) return nil } @@ -230,36 +225,34 @@ func sweepONTAPStorageVirtualMachine(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.FSxConn(ctx) + conn := client.FSxClient(ctx) sweepResources := make([]sweep.Sweepable, 0) var errs *multierror.Error input := &fsx.DescribeStorageVirtualMachinesInput{} - err = conn.DescribeStorageVirtualMachinesPagesWithContext(ctx, input, func(page *fsx.DescribeStorageVirtualMachinesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := fsx.NewDescribeStorageVirtualMachinesPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx ONTAP Storage Virtual Machine for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx ONTAP Storage Virtual Machine for %s: %w", region, err)) } for _, vm := range page.StorageVirtualMachines { r := resourceONTAPStorageVirtualMachine() d := r.Data(nil) - d.SetId(aws.StringValue(vm.StorageVirtualMachineId)) + d.SetId(aws.ToString(vm.StorageVirtualMachineId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing FSx ONTAP Storage Virtual Machine for %s: %w", region, err)) - } - - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx ONTAP Storage Virtual Machine for %s: %w", region, err)) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { + if awsv2.SkipSweepError(errs.ErrorOrNil()) { log.Printf("[WARN] Skipping FSx ONTAP Storage Virtual Machine sweep for %s: %s", region, errs) return nil } @@ -275,45 +268,43 @@ func sweepONTAPVolumes(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.FSxConn(ctx) + conn := client.FSxClient(ctx) sweepResources := make([]sweep.Sweepable, 0) var errs *multierror.Error input := &fsx.DescribeVolumesInput{} - err = conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := fsx.NewDescribeVolumesPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx ONTAP Volume for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx ONTAP Volume for %s: %w", region, err)) } for _, v := range page.Volumes { - if aws.StringValue(v.VolumeType) != fsx.VolumeTypeOntap { + if v.VolumeType != awstypes.VolumeTypeOntap { continue } - if v.OntapConfiguration != nil && aws.BoolValue(v.OntapConfiguration.StorageVirtualMachineRoot) { + if v.OntapConfiguration != nil && aws.ToBool(v.OntapConfiguration.StorageVirtualMachineRoot) { continue } r := resourceONTAPVolume() d := r.Data(nil) - d.SetId(aws.StringValue(v.VolumeId)) + d.SetId(aws.ToString(v.VolumeId)) d.Set("bypass_snaplock_enterprise_retention", true) d.Set("skip_final_backup", true) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing FSx ONTAP Volume for %s: %w", region, err)) - } - - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx ONTAP Volume for %s: %w", region, err)) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { + if awsv2.SkipSweepError(errs.ErrorOrNil()) { log.Printf("[WARN] Skipping FSx ONTAP Volume sweep for %s: %s", region, errs) return nil } @@ -329,40 +320,38 @@ func sweepOpenZFSFileSystems(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.FSxConn(ctx) + conn := client.FSxClient(ctx) sweepResources := make([]sweep.Sweepable, 0) var errs *multierror.Error input := &fsx.DescribeFileSystemsInput{} - err = conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := fsx.NewDescribeFileSystemsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx OpenZFS File Systems for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx OpenZFS File Systems for %s: %w", region, err)) } for _, fs := range page.FileSystems { - if aws.StringValue(fs.FileSystemType) != fsx.FileSystemTypeOpenzfs { + if fs.FileSystemType != awstypes.FileSystemTypeOpenzfs { continue } r := resourceOpenZFSFileSystem() d := r.Data(nil) - d.SetId(aws.StringValue(fs.FileSystemId)) + d.SetId(aws.ToString(fs.FileSystemId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing FSx OpenZFS File Systems for %s: %w", region, err)) - } - - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx OpenZFS File Systems for %s: %w", region, err)) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { + if awsv2.SkipSweepError(errs.ErrorOrNil()) { log.Printf("[WARN] Skipping FSx OpenZFS File System sweep for %s: %s", region, errs) return nil } @@ -378,43 +367,41 @@ func sweepOpenZFSVolume(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.FSxConn(ctx) + conn := client.FSxClient(ctx) sweepResources := make([]sweep.Sweepable, 0) var errs *multierror.Error input := &fsx.DescribeVolumesInput{} - err = conn.DescribeVolumesPagesWithContext(ctx, input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := fsx.NewDescribeVolumesPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx OpenZFS Volume for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx OpenZFS Volume for %s: %w", region, err)) } for _, v := range page.Volumes { - if aws.StringValue(v.VolumeType) != fsx.VolumeTypeOpenzfs { + if v.VolumeType != awstypes.VolumeTypeOpenzfs { continue } - if v.OpenZFSConfiguration != nil && aws.StringValue(v.OpenZFSConfiguration.ParentVolumeId) == "" { + if v.OpenZFSConfiguration != nil && aws.ToString(v.OpenZFSConfiguration.ParentVolumeId) == "" { continue } r := resourceOpenZFSVolume() d := r.Data(nil) - d.SetId(aws.StringValue(v.VolumeId)) + d.SetId(aws.ToString(v.VolumeId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing FSx OpenZFS Volume for %s: %w", region, err)) - } - - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx OpenZFS Volume for %s: %w", region, err)) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { + if awsv2.SkipSweepError(errs.ErrorOrNil()) { log.Printf("[WARN] Skipping FSx OpenZFS Volume sweep for %s: %s", region, errs) return nil } @@ -430,41 +417,39 @@ func sweepWindowsFileSystems(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.FSxConn(ctx) + conn := client.FSxClient(ctx) sweepResources := make([]sweep.Sweepable, 0) var errs *multierror.Error input := &fsx.DescribeFileSystemsInput{} - err = conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := fsx.NewDescribeFileSystemsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx Windows File Systems for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx Windows File Systems for %s: %w", region, err)) } for _, fs := range page.FileSystems { - if aws.StringValue(fs.FileSystemType) != fsx.FileSystemTypeWindows { + if fs.FileSystemType != awstypes.FileSystemTypeWindows { continue } r := resourceWindowsFileSystem() d := r.Data(nil) - d.SetId(aws.StringValue(fs.FileSystemId)) + d.SetId(aws.ToString(fs.FileSystemId)) d.Set("skip_final_backup", true) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing FSx Windows File Systems for %s: %w", region, err)) - } - - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx Windows File Systems for %s: %w", region, err)) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { + if awsv2.SkipSweepError(errs.ErrorOrNil()) { log.Printf("[WARN] Skipping FSx Windows File System sweep for %s: %s", region, errs) return nil } diff --git a/internal/service/fsx/tags_gen.go b/internal/service/fsx/tags_gen.go index cdd98c088a78..95fb0b6aecee 100644 --- a/internal/service/fsx/tags_gen.go +++ b/internal/service/fsx/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/aws/aws-sdk-go/service/fsx/fsxiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists fsx service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn fsxiface.FSxAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *fsx.Client, identifier string, optFns ...func(*fsx.Options)) (tftags.KeyValueTags, error) { input := &fsx.ListTagsForResourceInput{ ResourceARN: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn fsxiface.FSxAPI, identifier string) (tft // ListTags lists fsx service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).FSxConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).FSxClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns fsx service tags. -func Tags(tags tftags.KeyValueTags) []*fsx.Tag { - result := make([]*fsx.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &fsx.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*fsx.Tag { } // KeyValueTags creates tftags.KeyValueTags from fsx service tags. -func KeyValueTags(ctx context.Context, tags []*fsx.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*fsx.Tag) tftags.KeyValueTags { // getTagsIn returns fsx service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*fsx.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*fsx.Tag { } // setTagsOut sets fsx service tags in Context. -func setTagsOut(ctx context.Context, tags []*fsx.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*fsx.Tag) { // updateTags updates fsx service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn fsxiface.FSxAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *fsx.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*fsx.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn fsxiface.FSxAPI, identifier string, ol if len(removedTags) > 0 { input := &fsx.UntagResourceInput{ ResourceARN: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn fsxiface.FSxAPI, identifier string, ol Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn fsxiface.FSxAPI, identifier string, ol // UpdateTags updates fsx service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).FSxConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).FSxClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/fsx/windows_file_system.go b/internal/service/fsx/windows_file_system.go index 943f31801aba..aee92b355901 100644 --- a/internal/service/fsx/windows_file_system.go +++ b/internal/service/fsx/windows_file_system.go @@ -11,15 +11,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/fsx" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/fsx" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -92,16 +94,16 @@ func resourceWindowsFileSystem() *schema.Resource { }, }, "file_access_audit_log_level": { - Type: schema.TypeString, - Optional: true, - Default: fsx.WindowsAccessAuditLogLevelDisabled, - ValidateFunc: validation.StringInSlice(fsx.WindowsAccessAuditLogLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.WindowsAccessAuditLogLevelDisabled, + ValidateDiagFunc: enum.Validate[awstypes.WindowsAccessAuditLogLevel](), }, "file_share_access_audit_log_level": { - Type: schema.TypeString, - Optional: true, - Default: fsx.WindowsAccessAuditLogLevelDisabled, - ValidateFunc: validation.StringInSlice(fsx.WindowsAccessAuditLogLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.WindowsAccessAuditLogLevelDisabled, + ValidateDiagFunc: enum.Validate[awstypes.WindowsAccessAuditLogLevel](), }, }, }, @@ -133,11 +135,11 @@ func resourceWindowsFileSystem() *schema.Resource { ), }, "deployment_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: fsx.WindowsDeploymentTypeSingleAz1, - ValidateFunc: validation.StringInSlice(fsx.WindowsDeploymentType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.WindowsDeploymentTypeSingleAz1, + ValidateDiagFunc: enum.Validate[awstypes.WindowsDeploymentType](), }, "disk_iops_configuration": { Type: schema.TypeList, @@ -153,10 +155,10 @@ func resourceWindowsFileSystem() *schema.Resource { ValidateFunc: validation.IntBetween(0, 350000), }, names.AttrMode: { - Type: schema.TypeString, - Optional: true, - Default: fsx.DiskIopsConfigurationModeAutomatic, - ValidateFunc: validation.StringInSlice(fsx.DiskIopsConfigurationMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DiskIopsConfigurationModeAutomatic, + ValidateDiagFunc: enum.Validate[awstypes.DiskIopsConfigurationMode](), }, }, }, @@ -261,11 +263,11 @@ func resourceWindowsFileSystem() *schema.Resource { ValidateFunc: validation.IntBetween(32, 65536), }, names.AttrStorageType: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: fsx.StorageTypeSsd, - ValidateFunc: validation.StringInSlice(fsx.StorageType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.StorageTypeSsd, + ValidateDiagFunc: enum.Validate[awstypes.StorageType](), }, names.AttrSubnetIDs: { Type: schema.TypeList, @@ -302,28 +304,28 @@ func resourceWindowsFileSystem() *schema.Resource { func resourceWindowsFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) inputC := &fsx.CreateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), - FileSystemType: aws.String(fsx.FileSystemTypeWindows), - StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), - SubnetIds: flex.ExpandStringList(d.Get(names.AttrSubnetIDs).([]interface{})), + FileSystemType: awstypes.FileSystemTypeWindows, + StorageCapacity: aws.Int32(int32(d.Get("storage_capacity").(int))), + SubnetIds: flex.ExpandStringValueList(d.Get(names.AttrSubnetIDs).([]interface{})), Tags: getTagsIn(ctx), - WindowsConfiguration: &fsx.CreateFileSystemWindowsConfiguration{ - AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), + WindowsConfiguration: &awstypes.CreateFileSystemWindowsConfiguration{ + AutomaticBackupRetentionDays: aws.Int32(int32(d.Get("automatic_backup_retention_days").(int))), CopyTagsToBackups: aws.Bool(d.Get("copy_tags_to_backups").(bool)), - ThroughputCapacity: aws.Int64(int64(d.Get("throughput_capacity").(int))), + ThroughputCapacity: aws.Int32(int32(d.Get("throughput_capacity").(int))), }, } inputB := &fsx.CreateFileSystemFromBackupInput{ ClientRequestToken: aws.String(id.UniqueId()), - SubnetIds: flex.ExpandStringList(d.Get(names.AttrSubnetIDs).([]interface{})), + SubnetIds: flex.ExpandStringValueList(d.Get(names.AttrSubnetIDs).([]interface{})), Tags: getTagsIn(ctx), - WindowsConfiguration: &fsx.CreateFileSystemWindowsConfiguration{ - AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), + WindowsConfiguration: &awstypes.CreateFileSystemWindowsConfiguration{ + AutomaticBackupRetentionDays: aws.Int32(int32(d.Get("automatic_backup_retention_days").(int))), CopyTagsToBackups: aws.Bool(d.Get("copy_tags_to_backups").(bool)), - ThroughputCapacity: aws.Int64(int64(d.Get("throughput_capacity").(int))), + ThroughputCapacity: aws.Int32(int32(d.Get("throughput_capacity").(int))), }, } @@ -333,8 +335,8 @@ func resourceWindowsFileSystemCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk("aliases"); ok { - inputC.WindowsConfiguration.Aliases = flex.ExpandStringSet(v.(*schema.Set)) - inputB.WindowsConfiguration.Aliases = flex.ExpandStringSet(v.(*schema.Set)) + inputC.WindowsConfiguration.Aliases = flex.ExpandStringValueSet(v.(*schema.Set)) + inputB.WindowsConfiguration.Aliases = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("audit_log_configuration"); ok && len(v.([]interface{})) > 0 { @@ -353,8 +355,8 @@ func resourceWindowsFileSystemCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk("deployment_type"); ok { - inputC.WindowsConfiguration.DeploymentType = aws.String(v.(string)) - inputB.WindowsConfiguration.DeploymentType = aws.String(v.(string)) + inputC.WindowsConfiguration.DeploymentType = awstypes.WindowsDeploymentType(v.(string)) + inputB.WindowsConfiguration.DeploymentType = awstypes.WindowsDeploymentType(v.(string)) } if v, ok := d.GetOk(names.AttrKMSKeyID); ok { @@ -368,8 +370,8 @@ func resourceWindowsFileSystemCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk(names.AttrSecurityGroupIDs); ok { - inputC.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) - inputB.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + inputC.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) + inputB.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("self_managed_active_directory"); ok { @@ -378,8 +380,8 @@ func resourceWindowsFileSystemCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk(names.AttrStorageType); ok { - inputC.StorageType = aws.String(v.(string)) - inputB.StorageType = aws.String(v.(string)) + inputC.StorageType = awstypes.StorageType(v.(string)) + inputB.StorageType = awstypes.StorageType(v.(string)) } if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { @@ -391,21 +393,21 @@ func resourceWindowsFileSystemCreate(ctx context.Context, d *schema.ResourceData backupID := v.(string) inputB.BackupId = aws.String(backupID) - output, err := conn.CreateFileSystemFromBackupWithContext(ctx, inputB) + output, err := conn.CreateFileSystemFromBackup(ctx, inputB) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for Windows File Server File System from backup (%s): %s", backupID, err) } - d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) + d.SetId(aws.ToString(output.FileSystem.FileSystemId)) } else { - output, err := conn.CreateFileSystemWithContext(ctx, inputC) + output, err := conn.CreateFileSystem(ctx, inputC) if err != nil { return sdkdiag.AppendErrorf(diags, "creating FSx for Windows File Server File System: %s", err) } - d.SetId(aws.StringValue(output.FileSystem.FileSystemId)) + d.SetId(aws.ToString(output.FileSystem.FileSystemId)) } if _, err := waitFileSystemCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -417,7 +419,7 @@ func resourceWindowsFileSystemCreate(ctx context.Context, d *schema.ResourceData func resourceWindowsFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) filesystem, err := findWindowsFileSystemByID(ctx, conn, d.Id()) @@ -434,7 +436,7 @@ func resourceWindowsFileSystemRead(ctx context.Context, d *schema.ResourceData, windowsConfig := filesystem.WindowsConfiguration d.Set("active_directory_id", windowsConfig.ActiveDirectoryId) - d.Set("aliases", aws.StringValueSlice(expandAliasValues(windowsConfig.Aliases))) + d.Set("aliases", expandAliasValues(windowsConfig.Aliases)) d.Set(names.AttrARN, filesystem.ResourceARN) if err := d.Set("audit_log_configuration", flattenWindowsAuditLogConfiguration(windowsConfig.AuditLogConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting audit_log_configuration: %s", err) @@ -448,7 +450,7 @@ func resourceWindowsFileSystemRead(ctx context.Context, d *schema.ResourceData, } d.Set(names.AttrDNSName, filesystem.DNSName) d.Set(names.AttrKMSKeyID, filesystem.KmsKeyId) - d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)) + d.Set("network_interface_ids", filesystem.NetworkInterfaceIds) d.Set(names.AttrOwnerID, filesystem.OwnerId) d.Set("preferred_file_server_ip", windowsConfig.PreferredFileServerIp) d.Set("preferred_subnet_id", windowsConfig.PreferredSubnetId) @@ -458,7 +460,7 @@ func resourceWindowsFileSystemRead(ctx context.Context, d *schema.ResourceData, } d.Set("storage_capacity", filesystem.StorageCapacity) d.Set(names.AttrStorageType, filesystem.StorageType) - d.Set(names.AttrSubnetIDs, aws.StringValueSlice(filesystem.SubnetIds)) + d.Set(names.AttrSubnetIDs, filesystem.SubnetIds) d.Set("throughput_capacity", windowsConfig.ThroughputCapacity) d.Set(names.AttrVPCID, filesystem.VpcId) d.Set("weekly_maintenance_start_time", windowsConfig.WeeklyMaintenanceStartTime) @@ -470,7 +472,7 @@ func resourceWindowsFileSystemRead(ctx context.Context, d *schema.ResourceData, func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) if d.HasChange("aliases") { o, n := d.GetChange("aliases") @@ -479,35 +481,35 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData if len(add) > 0 { input := &fsx.AssociateFileSystemAliasesInput{ - Aliases: aws.StringSlice(add), + Aliases: add, FileSystemId: aws.String(d.Id()), } - _, err := conn.AssociateFileSystemAliasesWithContext(ctx, input) + _, err := conn.AssociateFileSystemAliases(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "associating FSx for Windows File Server File System (%s) aliases: %s", d.Id(), err) } - if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasAssociation, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemAliasAssociation, err) + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeFileSystemAliasAssociation, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeFileSystemAliasAssociation, err) } } if len(del) > 0 { input := &fsx.DisassociateFileSystemAliasesInput{ - Aliases: aws.StringSlice(del), + Aliases: del, FileSystemId: aws.String(d.Id()), } - _, err := conn.DisassociateFileSystemAliasesWithContext(ctx, input) + _, err := conn.DisassociateFileSystemAliases(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "disassociating FSx for Windows File Server File System (%s) aliases: %s", d.Id(), err) } - if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemAliasDisassociation, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx for Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemAliasDisassociation, err) + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeFileSystemAliasDisassociation, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx for Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeFileSystemAliasDisassociation, err) } } } @@ -520,13 +522,13 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), - WindowsConfiguration: &fsx.UpdateFileSystemWindowsConfiguration{ - ThroughputCapacity: aws.Int64(int64(n)), + WindowsConfiguration: &awstypes.UpdateFileSystemWindowsConfiguration{ + ThroughputCapacity: aws.Int32(int32(n)), }, } startTime := time.Now() - _, err := conn.UpdateFileSystemWithContext(ctx, input) + _, err := conn.UpdateFileSystem(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for Windows File Server File System (%s) ThroughputCapacity: %s", d.Id(), err) @@ -536,8 +538,8 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) update: %s", d.Id(), err) } - if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, err) } } } @@ -552,7 +554,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData input := &fsx.UpdateFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), - WindowsConfiguration: &fsx.UpdateFileSystemWindowsConfiguration{}, + WindowsConfiguration: &awstypes.UpdateFileSystemWindowsConfiguration{}, } if d.HasChange("audit_log_configuration") { @@ -560,7 +562,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChange("automatic_backup_retention_days") { - input.WindowsConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))) + input.WindowsConfiguration.AutomaticBackupRetentionDays = aws.Int32(int32(d.Get("automatic_backup_retention_days").(int))) } if d.HasChange("daily_automatic_backup_start_time") { @@ -576,11 +578,11 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChange("storage_capacity") { - input.StorageCapacity = aws.Int64(int64(d.Get("storage_capacity").(int))) + input.StorageCapacity = aws.Int32(int32(d.Get("storage_capacity").(int))) } if d.HasChange("throughput_capacity") { - input.WindowsConfiguration.ThroughputCapacity = aws.Int64(int64(d.Get("throughput_capacity").(int))) + input.WindowsConfiguration.ThroughputCapacity = aws.Int32(int32(d.Get("throughput_capacity").(int))) } if d.HasChange("weekly_maintenance_start_time") { @@ -588,7 +590,7 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData } startTime := time.Now() - _, err := conn.UpdateFileSystemWithContext(ctx, input) + _, err := conn.UpdateFileSystem(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating FSx for Windows File Server File System (%s): %s", d.Id(), err) @@ -598,8 +600,8 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) update: %s", d.Id(), err) } - if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), fsx.AdministrativeActionTypeFileSystemUpdate, err) + if _, err := waitFileSystemAdministrativeActionCompleted(ctx, conn, d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for FSx Windows File Server File System (%s) administrative action (%s) complete: %s", d.Id(), awstypes.AdministrativeActionTypeFileSystemUpdate, err) } } @@ -608,12 +610,12 @@ func resourceWindowsFileSystemUpdate(ctx context.Context, d *schema.ResourceData func resourceWindowsFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) input := &fsx.DeleteFileSystemInput{ ClientRequestToken: aws.String(id.UniqueId()), FileSystemId: aws.String(d.Id()), - WindowsConfiguration: &fsx.DeleteFileSystemWindowsConfiguration{ + WindowsConfiguration: &awstypes.DeleteFileSystemWindowsConfiguration{ SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), }, } @@ -623,9 +625,9 @@ func resourceWindowsFileSystemDelete(ctx context.Context, d *schema.ResourceData } log.Printf("[DEBUG] Deleting FSx for Windows File Server File System: %s", d.Id()) - _, err := conn.DeleteFileSystemWithContext(ctx, input) + _, err := conn.DeleteFileSystem(ctx, input) - if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { + if errs.IsA[*awstypes.FileSystemNotFound](err) { return diags } @@ -640,7 +642,7 @@ func resourceWindowsFileSystemDelete(ctx context.Context, d *schema.ResourceData return diags } -func expandAliasValues(aliases []*fsx.Alias) []*string { +func expandAliasValues(aliases []awstypes.Alias) []*string { var alternateDNSNames []*string for _, alias := range aliases { @@ -651,15 +653,15 @@ func expandAliasValues(aliases []*fsx.Alias) []*string { return alternateDNSNames } -func expandSelfManagedActiveDirectoryConfigurationCreate(l []interface{}) *fsx.SelfManagedActiveDirectoryConfiguration { +func expandSelfManagedActiveDirectoryConfigurationCreate(l []interface{}) *awstypes.SelfManagedActiveDirectoryConfiguration { if len(l) == 0 || l[0] == nil { return nil } data := l[0].(map[string]interface{}) - req := &fsx.SelfManagedActiveDirectoryConfiguration{ + req := &awstypes.SelfManagedActiveDirectoryConfiguration{ DomainName: aws.String(data[names.AttrDomainName].(string)), - DnsIps: flex.ExpandStringSet(data["dns_ips"].(*schema.Set)), + DnsIps: flex.ExpandStringValueSet(data["dns_ips"].(*schema.Set)), Password: aws.String(data[names.AttrPassword].(string)), UserName: aws.String(data[names.AttrUsername].(string)), } @@ -675,16 +677,16 @@ func expandSelfManagedActiveDirectoryConfigurationCreate(l []interface{}) *fsx.S return req } -func expandSelfManagedActiveDirectoryConfigurationUpdate(l []interface{}) *fsx.SelfManagedActiveDirectoryConfigurationUpdates { +func expandSelfManagedActiveDirectoryConfigurationUpdate(l []interface{}) *awstypes.SelfManagedActiveDirectoryConfigurationUpdates { if len(l) == 0 || l[0] == nil { return nil } data := l[0].(map[string]interface{}) - req := &fsx.SelfManagedActiveDirectoryConfigurationUpdates{} + req := &awstypes.SelfManagedActiveDirectoryConfigurationUpdates{} if v, ok := data["dns_ips"].(*schema.Set); ok && v.Len() > 0 { - req.DnsIps = flex.ExpandStringSet(v) + req.DnsIps = flex.ExpandStringValueSet(v) } if v, ok := data[names.AttrPassword].(string); ok && v != "" { @@ -698,7 +700,7 @@ func expandSelfManagedActiveDirectoryConfigurationUpdate(l []interface{}) *fsx.S return req } -func flattenSelfManagedActiveDirectoryConfiguration(d *schema.ResourceData, adopts *fsx.SelfManagedActiveDirectoryAttributes) []map[string]interface{} { +func flattenSelfManagedActiveDirectoryConfiguration(d *schema.ResourceData, adopts *awstypes.SelfManagedActiveDirectoryAttributes) []map[string]interface{} { if adopts == nil { return []map[string]interface{}{} } @@ -710,18 +712,18 @@ func flattenSelfManagedActiveDirectoryConfiguration(d *schema.ResourceData, adop // See also: flattenEmrKerberosAttributes m := map[string]interface{}{ - "dns_ips": aws.StringValueSlice(adopts.DnsIps), - names.AttrDomainName: aws.StringValue(adopts.DomainName), - "file_system_administrators_group": aws.StringValue(adopts.FileSystemAdministratorsGroup), - "organizational_unit_distinguished_name": aws.StringValue(adopts.OrganizationalUnitDistinguishedName), + "dns_ips": adopts.DnsIps, + names.AttrDomainName: aws.ToString(adopts.DomainName), + "file_system_administrators_group": aws.ToString(adopts.FileSystemAdministratorsGroup), + "organizational_unit_distinguished_name": aws.ToString(adopts.OrganizationalUnitDistinguishedName), names.AttrPassword: d.Get("self_managed_active_directory.0.password").(string), - names.AttrUsername: aws.StringValue(adopts.UserName), + names.AttrUsername: aws.ToString(adopts.UserName), } return []map[string]interface{}{m} } -func expandWindowsAuditLogCreateConfiguration(l []interface{}) *fsx.WindowsAuditLogCreateConfiguration { +func expandWindowsAuditLogCreateConfiguration(l []interface{}) *awstypes.WindowsAuditLogCreateConfiguration { if len(l) == 0 || l[0] == nil { return nil } @@ -734,13 +736,13 @@ func expandWindowsAuditLogCreateConfiguration(l []interface{}) *fsx.WindowsAudit return nil } - req := &fsx.WindowsAuditLogCreateConfiguration{ - FileAccessAuditLogLevel: aws.String(fileAccessAuditLogLevel), - FileShareAccessAuditLogLevel: aws.String(fileShareAccessAuditLogLevel), + req := &awstypes.WindowsAuditLogCreateConfiguration{ + FileAccessAuditLogLevel: awstypes.WindowsAccessAuditLogLevel(fileAccessAuditLogLevel), + FileShareAccessAuditLogLevel: awstypes.WindowsAccessAuditLogLevel(fileShareAccessAuditLogLevel), } // audit_log_destination cannot be included in the request if the log levels are disabled - if fileAccessAuditLogLevel == fsx.WindowsAccessAuditLogLevelDisabled && fileShareAccessAuditLogLevel == fsx.WindowsAccessAuditLogLevelDisabled { + if fileAccessAuditLogLevel == string(awstypes.WindowsAccessAuditLogLevelDisabled) && fileShareAccessAuditLogLevel == string(awstypes.WindowsAccessAuditLogLevelDisabled) { return req } @@ -751,43 +753,43 @@ func expandWindowsAuditLogCreateConfiguration(l []interface{}) *fsx.WindowsAudit return req } -func flattenWindowsAuditLogConfiguration(adopts *fsx.WindowsAuditLogConfiguration) []map[string]interface{} { +func flattenWindowsAuditLogConfiguration(adopts *awstypes.WindowsAuditLogConfiguration) []map[string]interface{} { if adopts == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "file_access_audit_log_level": aws.StringValue(adopts.FileAccessAuditLogLevel), - "file_share_access_audit_log_level": aws.StringValue(adopts.FileShareAccessAuditLogLevel), + "file_access_audit_log_level": string(adopts.FileAccessAuditLogLevel), + "file_share_access_audit_log_level": string(adopts.FileShareAccessAuditLogLevel), } if adopts.AuditLogDestination != nil { - m["audit_log_destination"] = aws.StringValue(adopts.AuditLogDestination) + m["audit_log_destination"] = aws.ToString(adopts.AuditLogDestination) } return []map[string]interface{}{m} } -func expandWindowsDiskIopsConfiguration(l []interface{}) *fsx.DiskIopsConfiguration { +func expandWindowsDiskIopsConfiguration(l []interface{}) *awstypes.DiskIopsConfiguration { if len(l) == 0 || l[0] == nil { return nil } data := l[0].(map[string]interface{}) - req := &fsx.DiskIopsConfiguration{} + req := &awstypes.DiskIopsConfiguration{} if v, ok := data[names.AttrIOPS].(int); ok { req.Iops = aws.Int64(int64(v)) } if v, ok := data[names.AttrMode].(string); ok && v != "" { - req.Mode = aws.String(v) + req.Mode = awstypes.DiskIopsConfigurationMode(v) } return req } -func flattenWindowsDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { +func flattenWindowsDiskIopsConfiguration(rs *awstypes.DiskIopsConfiguration) []interface{} { if rs == nil { return []interface{}{} } @@ -795,11 +797,9 @@ func flattenWindowsDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interf m := map[string]interface{}{} if rs.Iops != nil { - m[names.AttrIOPS] = aws.Int64Value(rs.Iops) - } - if rs.Mode != nil { - m[names.AttrMode] = aws.StringValue(rs.Mode) + m[names.AttrIOPS] = aws.ToInt64(rs.Iops) } + m[names.AttrMode] = string(rs.Mode) return []interface{}{m} } @@ -819,8 +819,8 @@ func windowsAuditLogStateFunc(v interface{}) string { return value } -func findWindowsFileSystemByID(ctx context.Context, conn *fsx.FSx, id string) (*fsx.FileSystem, error) { - output, err := findFileSystemByIDAndType(ctx, conn, id, fsx.FileSystemTypeWindows) +func findWindowsFileSystemByID(ctx context.Context, conn *fsx.Client, id string) (*awstypes.FileSystem, error) { + output, err := findFileSystemByIDAndType(ctx, conn, id, awstypes.FileSystemTypeWindows) if err != nil { return nil, err diff --git a/internal/service/fsx/windows_file_system_data_source.go b/internal/service/fsx/windows_file_system_data_source.go index 15f7db17e19c..8bbf39aab396 100644 --- a/internal/service/fsx/windows_file_system_data_source.go +++ b/internal/service/fsx/windows_file_system_data_source.go @@ -6,7 +6,7 @@ package fsx import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -168,7 +168,7 @@ func dataSourceWindowsFileSystem() *schema.Resource { func dataSourceWindowsFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FSxConn(ctx) + conn := meta.(*conns.AWSClient).FSxClient(ctx) id := d.Get(names.AttrID).(string) filesystem, err := findWindowsFileSystemByID(ctx, conn, id) @@ -179,9 +179,9 @@ func dataSourceWindowsFileSystemRead(ctx context.Context, d *schema.ResourceData windowsConfig := filesystem.WindowsConfiguration - d.SetId(aws.StringValue(filesystem.FileSystemId)) + d.SetId(aws.ToString(filesystem.FileSystemId)) d.Set("active_directory_id", windowsConfig.ActiveDirectoryId) - d.Set("aliases", aws.StringValueSlice(expandAliasValues(windowsConfig.Aliases))) + d.Set("aliases", expandAliasValues(windowsConfig.Aliases)) d.Set(names.AttrARN, filesystem.ResourceARN) if err := d.Set("audit_log_configuration", flattenWindowsAuditLogConfiguration(windowsConfig.AuditLogConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting audit_log_configuration: %s", err) @@ -196,13 +196,13 @@ func dataSourceWindowsFileSystemRead(ctx context.Context, d *schema.ResourceData d.Set(names.AttrDNSName, filesystem.DNSName) d.Set(names.AttrID, filesystem.FileSystemId) d.Set(names.AttrKMSKeyID, filesystem.KmsKeyId) - d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)) + d.Set("network_interface_ids", filesystem.NetworkInterfaceIds) d.Set(names.AttrOwnerID, filesystem.OwnerId) d.Set("preferred_file_server_ip", windowsConfig.PreferredFileServerIp) d.Set("preferred_subnet_id", windowsConfig.PreferredSubnetId) d.Set("storage_capacity", filesystem.StorageCapacity) d.Set(names.AttrStorageType, filesystem.StorageType) - d.Set(names.AttrSubnetIDs, aws.StringValueSlice(filesystem.SubnetIds)) + d.Set(names.AttrSubnetIDs, filesystem.SubnetIds) d.Set("throughput_capacity", windowsConfig.ThroughputCapacity) d.Set(names.AttrVPCID, filesystem.VpcId) d.Set("weekly_maintenance_start_time", windowsConfig.WeeklyMaintenanceStartTime) diff --git a/internal/service/fsx/windows_file_system_test.go b/internal/service/fsx/windows_file_system_test.go index 489f687c18b7..83eb2daa61de 100644 --- a/internal/service/fsx/windows_file_system_test.go +++ b/internal/service/fsx/windows_file_system_test.go @@ -9,8 +9,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/fsx" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/fsx/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,13 +23,13 @@ import ( func TestAccFSxWindowsFileSystem_basic(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -82,13 +82,13 @@ func TestAccFSxWindowsFileSystem_basic(t *testing.T) { func TestAccFSxWindowsFileSystem_disappears(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -107,13 +107,13 @@ func TestAccFSxWindowsFileSystem_disappears(t *testing.T) { func TestAccFSxWindowsFileSystem_singleAz2(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -159,13 +159,13 @@ func TestAccFSxWindowsFileSystem_singleAz2(t *testing.T) { func TestAccFSxWindowsFileSystem_storageTypeHdd(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -194,13 +194,13 @@ func TestAccFSxWindowsFileSystem_storageTypeHdd(t *testing.T) { func TestAccFSxWindowsFileSystem_multiAz(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -245,13 +245,13 @@ func TestAccFSxWindowsFileSystem_multiAz(t *testing.T) { func TestAccFSxWindowsFileSystem_aliases(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2, filesystem3 fsx.FileSystem + var filesystem1, filesystem2, filesystem3 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -299,13 +299,13 @@ func TestAccFSxWindowsFileSystem_aliases(t *testing.T) { func TestAccFSxWindowsFileSystem_automaticBackupRetentionDays(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2, filesystem3 fsx.FileSystem + var filesystem1, filesystem2, filesystem3 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -349,13 +349,13 @@ func TestAccFSxWindowsFileSystem_automaticBackupRetentionDays(t *testing.T) { func TestAccFSxWindowsFileSystem_copyTagsToBackups(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -391,13 +391,13 @@ func TestAccFSxWindowsFileSystem_copyTagsToBackups(t *testing.T) { func TestAccFSxWindowsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -433,7 +433,7 @@ func TestAccFSxWindowsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { func TestAccFSxWindowsFileSystem_deleteConfig(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -441,7 +441,7 @@ func TestAccFSxWindowsFileSystem_deleteConfig(t *testing.T) { acctest.SkipIfEnvVarNotSet(t, "AWS_FSX_CREATE_FINAL_BACKUP") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -472,7 +472,7 @@ func TestAccFSxWindowsFileSystem_deleteConfig(t *testing.T) { func TestAccFSxWindowsFileSystem_kmsKeyID(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem kmsKeyResourceName1 := "aws_kms_key.test1" kmsKeyResourceName2 := "aws_kms_key.test2" resourceName := "aws_fsx_windows_file_system.test" @@ -480,7 +480,7 @@ func TestAccFSxWindowsFileSystem_kmsKeyID(t *testing.T) { domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -516,13 +516,13 @@ func TestAccFSxWindowsFileSystem_kmsKeyID(t *testing.T) { func TestAccFSxWindowsFileSystem_securityGroupIDs(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -558,13 +558,13 @@ func TestAccFSxWindowsFileSystem_securityGroupIDs(t *testing.T) { func TestAccFSxWindowsFileSystem_selfManagedActiveDirectory(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -593,13 +593,13 @@ func TestAccFSxWindowsFileSystem_selfManagedActiveDirectory(t *testing.T) { func TestAccFSxWindowsFileSystem_storageCapacity(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -637,13 +637,13 @@ func TestAccFSxWindowsFileSystem_storageCapacity(t *testing.T) { func TestAccFSxWindowsFileSystem_fromBackup(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -672,13 +672,13 @@ func TestAccFSxWindowsFileSystem_fromBackup(t *testing.T) { func TestAccFSxWindowsFileSystem_tags(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2, filesystem3 fsx.FileSystem + var filesystem1, filesystem2, filesystem3 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -726,13 +726,13 @@ func TestAccFSxWindowsFileSystem_tags(t *testing.T) { func TestAccFSxWindowsFileSystem_throughputCapacity(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -768,13 +768,13 @@ func TestAccFSxWindowsFileSystem_throughputCapacity(t *testing.T) { func TestAccFSxWindowsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -810,24 +810,24 @@ func TestAccFSxWindowsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { func TestAccFSxWindowsFileSystem_audit(t *testing.T) { ctx := acctest.Context(t) - var filesystem fsx.FileSystem + var filesystem awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccWindowsFileSystemConfig_audit(rName, domainName, fsx.WindowsAccessAuditLogLevelSuccessOnly), + Config: testAccWindowsFileSystemConfig_audit(rName, domainName, string(awstypes.WindowsAccessAuditLogLevelSuccessOnly)), Check: resource.ComposeTestCheckFunc( testAccCheckWindowsFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_access_audit_log_level", fsx.WindowsAccessAuditLogLevelSuccessOnly), - resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_share_access_audit_log_level", fsx.WindowsAccessAuditLogLevelSuccessOnly), + resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_access_audit_log_level", string(awstypes.WindowsAccessAuditLogLevelSuccessOnly)), + resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_share_access_audit_log_level", string(awstypes.WindowsAccessAuditLogLevelSuccessOnly)), resource.TestCheckResourceAttrSet(resourceName, "audit_log_configuration.0.audit_log_destination"), ), }, @@ -842,22 +842,22 @@ func TestAccFSxWindowsFileSystem_audit(t *testing.T) { }, }, { - Config: testAccWindowsFileSystemConfig_audit(rName, domainName, fsx.WindowsAccessAuditLogLevelSuccessAndFailure), + Config: testAccWindowsFileSystemConfig_audit(rName, domainName, string(awstypes.WindowsAccessAuditLogLevelSuccessAndFailure)), Check: resource.ComposeTestCheckFunc( testAccCheckWindowsFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_access_audit_log_level", fsx.WindowsAccessAuditLogLevelSuccessAndFailure), - resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_share_access_audit_log_level", fsx.WindowsAccessAuditLogLevelSuccessAndFailure), + resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_access_audit_log_level", string(awstypes.WindowsAccessAuditLogLevelSuccessAndFailure)), + resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_share_access_audit_log_level", string(awstypes.WindowsAccessAuditLogLevelSuccessAndFailure)), resource.TestCheckResourceAttrSet(resourceName, "audit_log_configuration.0.audit_log_destination"), ), }, { - Config: testAccWindowsFileSystemConfig_auditNoDestination(rName, domainName, fsx.WindowsAccessAuditLogLevelDisabled), + Config: testAccWindowsFileSystemConfig_auditNoDestination(rName, domainName, string(awstypes.WindowsAccessAuditLogLevelDisabled)), Check: resource.ComposeTestCheckFunc( testAccCheckWindowsFileSystemExists(ctx, resourceName, &filesystem), resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_access_audit_log_level", fsx.WindowsAccessAuditLogLevelDisabled), - resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_share_access_audit_log_level", fsx.WindowsAccessAuditLogLevelDisabled), + resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_access_audit_log_level", string(awstypes.WindowsAccessAuditLogLevelDisabled)), + resource.TestCheckResourceAttr(resourceName, "audit_log_configuration.0.file_share_access_audit_log_level", string(awstypes.WindowsAccessAuditLogLevelDisabled)), ), }, }, @@ -866,13 +866,13 @@ func TestAccFSxWindowsFileSystem_audit(t *testing.T) { func TestAccFSxWindowsFileSystem_diskIops(t *testing.T) { ctx := acctest.Context(t) - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1, filesystem2 awstypes.FileSystem resourceName := "aws_fsx_windows_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.FSxEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckWindowsFileSystemDestroy(ctx), @@ -910,14 +910,14 @@ func TestAccFSxWindowsFileSystem_diskIops(t *testing.T) { }) } -func testAccCheckWindowsFileSystemExists(ctx context.Context, n string, v *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckWindowsFileSystemExists(ctx context.Context, n string, v *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) output, err := tffsx.FindWindowsFileSystemByID(ctx, conn, rs.Primary.ID) @@ -933,7 +933,7 @@ func testAccCheckWindowsFileSystemExists(ctx context.Context, n string, v *fsx.F func testAccCheckWindowsFileSystemDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_fsx_windows_file_system" { @@ -957,20 +957,20 @@ func testAccCheckWindowsFileSystemDestroy(ctx context.Context) resource.TestChec } } -func testAccCheckWindowsFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckWindowsFileSystemNotRecreated(i, j *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx for Windows File Server File System (%s) recreated", aws.StringValue(i.FileSystemId)) + if aws.ToString(i.FileSystemId) != aws.ToString(j.FileSystemId) { + return fmt.Errorf("FSx for Windows File Server File System (%s) recreated", aws.ToString(i.FileSystemId)) } return nil } } -func testAccCheckWindowsFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { +func testAccCheckWindowsFileSystemRecreated(i, j *awstypes.FileSystem) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { - return fmt.Errorf("FSx for Windows File Server File System (%s) not recreated", aws.StringValue(i.FileSystemId)) + if aws.ToString(i.FileSystemId) == aws.ToString(j.FileSystemId) { + return fmt.Errorf("FSx for Windows File Server File System (%s) not recreated", aws.ToString(i.FileSystemId)) } return nil diff --git a/names/data/names_data.hcl b/names/data/names_data.hcl index 09246eaf8138..9a5afdce41d5 100644 --- a/names/data/names_data.hcl +++ b/names/data/names_data.hcl @@ -4117,7 +4117,7 @@ service "fsx" { sdk { id = "FSx" - client_version = [1] + client_version = [2] } names { diff --git a/names/names.go b/names/names.go index a3d45fd11c66..4a297acc9362 100644 --- a/names/names.go +++ b/names/names.go @@ -79,6 +79,7 @@ const ( EventsEndpointID = "events" EvidentlyEndpointID = "evidently" FMSEndpointID = "fms" + FSxEndpointID = "fsx" GrafanaEndpointID = "grafana" IVSEndpointID = "ivs" IVSChatEndpointID = "ivschat"