From 976f754602c3b8360d498dbeb342fa5f0f1126b3 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 19 Aug 2021 00:44:37 +0300 Subject: [PATCH 1/4] add support for creating filesystem from backup --- aws/resource_aws_fsx_lustre_file_system.go | 50 +++++++++++++++--- ...esource_aws_fsx_lustre_file_system_test.go | 52 +++++++++++++++++++ 2 files changed, 96 insertions(+), 6 deletions(-) diff --git a/aws/resource_aws_fsx_lustre_file_system.go b/aws/resource_aws_fsx_lustre_file_system.go index d5460f88ac3..54909f3f7d8 100644 --- a/aws/resource_aws_fsx_lustre_file_system.go +++ b/aws/resource_aws_fsx_lustre_file_system.go @@ -36,6 +36,11 @@ func resourceAwsFsxLustreFileSystem() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "backup_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, "dns_name": { Type: schema.TypeString, Computed: true, @@ -90,7 +95,7 @@ func resourceAwsFsxLustreFileSystem() *schema.Resource { }, "storage_capacity": { Type: schema.TypeInt, - Required: true, + Optional: true, ValidateFunc: validation.IntAtLeast(1200), }, "subnet_ids": { @@ -227,69 +232,102 @@ func resourceAwsFsxLustreFileSystemCreate(d *schema.ResourceData, meta interface }, } + backupInput := &fsx.CreateFileSystemFromBackupInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + StorageType: aws.String(d.Get("storage_type").(string)), + SubnetIds: expandStringList(d.Get("subnet_ids").([]interface{})), + LustreConfiguration: &fsx.CreateFileSystemLustreConfiguration{ + DeploymentType: aws.String(d.Get("deployment_type").(string)), + }, + } + //Applicable only for TypePersistent1 if v, ok := d.GetOk("kms_key_id"); ok { input.KmsKeyId = aws.String(v.(string)) + backupInput.KmsKeyId = aws.String(v.(string)) } if v, ok := d.GetOk("automatic_backup_retention_days"); ok { input.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) + backupInput.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok { input.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) + backupInput.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) } if v, ok := d.GetOk("export_path"); ok { input.LustreConfiguration.ExportPath = aws.String(v.(string)) + backupInput.LustreConfiguration.ExportPath = aws.String(v.(string)) } if v, ok := d.GetOk("import_path"); ok { input.LustreConfiguration.ImportPath = aws.String(v.(string)) + backupInput.LustreConfiguration.ImportPath = aws.String(v.(string)) } if v, ok := d.GetOk("imported_file_chunk_size"); ok { input.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) + backupInput.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("security_group_ids"); ok { input.SecurityGroupIds = expandStringSet(v.(*schema.Set)) + backupInput.SecurityGroupIds = expandStringSet(v.(*schema.Set)) } if len(tags) > 0 { input.Tags = tags.IgnoreAws().FsxTags() + backupInput.Tags = tags.IgnoreAws().FsxTags() } if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + backupInput.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) } if v, ok := d.GetOk("per_unit_storage_throughput"); ok { input.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int))) + backupInput.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("drive_cache_type"); ok { input.LustreConfiguration.DriveCacheType = aws.String(v.(string)) + backupInput.LustreConfiguration.DriveCacheType = aws.String(v.(string)) } if v, ok := d.GetOk("auto_import_policy"); ok { input.LustreConfiguration.AutoImportPolicy = aws.String(v.(string)) + backupInput.LustreConfiguration.AutoImportPolicy = aws.String(v.(string)) } if v, ok := d.GetOk("copy_tags_to_backups"); ok { input.LustreConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + backupInput.LustreConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) } if v, ok := d.GetOk("data_compression_type"); ok { input.LustreConfiguration.DataCompressionType = aws.String(v.(string)) + backupInput.LustreConfiguration.DataCompressionType = aws.String(v.(string)) } - result, err := conn.CreateFileSystem(input) - if err != nil { - return fmt.Errorf("Error creating FSx Lustre filesystem: %w", err) - } + if v, ok := d.GetOk("backup_id"); ok { + backupInput.BackupId = aws.String(v.(string)) + result, err := conn.CreateFileSystemFromBackup(backupInput) + if err != nil { + return fmt.Errorf("Error creating FSx Lustre filesystem from backup: %w", err) + } - d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) + d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) + } else { + result, err := conn.CreateFileSystem(input) + if err != nil { + return fmt.Errorf("Error creating FSx Lustre filesystem: %w", err) + } + + d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) + } log.Println("[DEBUG] Waiting for filesystem to become available") diff --git a/aws/resource_aws_fsx_lustre_file_system_test.go b/aws/resource_aws_fsx_lustre_file_system_test.go index 7e6d42c56a8..31c53c74065 100644 --- a/aws/resource_aws_fsx_lustre_file_system_test.go +++ b/aws/resource_aws_fsx_lustre_file_system_test.go @@ -600,6 +600,35 @@ func TestAccAWSFsxLustreFileSystem_DeploymentTypePersistent1(t *testing.T) { }) } +func TestAccAWSFsxLustreFileSystem_fromBackup(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(fsx.EndpointsID, t) }, + ErrorCheck: testAccErrorCheck(t, fsx.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemFromBackup(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "50"), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1), + resource.TestCheckResourceAttrPair(resourceName, "backup_id", "aws_fsx_backup.test", "id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids", "backup_id"}, + }, + }, + }) +} + func TestAccAWSFsxLustreFileSystem_KmsKeyId(t *testing.T) { var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_lustre_file_system.test" @@ -1117,6 +1146,29 @@ resource "aws_fsx_lustre_file_system" "test" { `, perUnitStorageThroughput)) } +func testAccAwsFsxLustreFileSystemFromBackup() string { + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` +resource "aws_fsx_lustre_file_system" "base" { + storage_capacity = 1200 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "PERSISTENT_1" + per_unit_storage_throughput = 50 +} + +resource "aws_fsx_backup" "test" { + file_system_id = aws_fsx_lustre_file_system.base.id +} + +resource "aws_fsx_lustre_file_system" "test" { + storage_capacity = 1200 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "PERSISTENT_1" + per_unit_storage_throughput = 50 + backup_id = aws_fsx_backup.test.id +} +`)) +} + func testAccAwsFsxLustreFileSystemConfigKmsKeyId1() string { return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), ` resource "aws_kms_key" "test1" { From d007084cf7b93a3013ca6a974ef52dc57d5f7439 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 19 Aug 2021 00:49:28 +0300 Subject: [PATCH 2/4] docs --- website/docs/r/fsx_lustre_file_system.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/fsx_lustre_file_system.html.markdown b/website/docs/r/fsx_lustre_file_system.html.markdown index af4962dda2f..5fa2bdac67a 100644 --- a/website/docs/r/fsx_lustre_file_system.html.markdown +++ b/website/docs/r/fsx_lustre_file_system.html.markdown @@ -26,6 +26,7 @@ The following arguments are supported: * `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Minimum of `1200`. See more details at [Allowed values for Fsx storage capacity](https://docs.aws.amazon.com/fsx/latest/APIReference/API_CreateFileSystem.html#FSx-CreateFileSystem-request-StorageCapacity). Update is allowed only for `SCRATCH_2` and `PERSISTENT_1` deployment types, See more details at [Fsx Storage Capacity Update](https://docs.aws.amazon.com/fsx/latest/APIReference/API_UpdateFileSystem.html#FSx-UpdateFileSystem-request-StorageCapacity). * `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. File systems currently support only one subnet. The file server is also launched in that subnet's Availability Zone. +* `backup_id` - (Optional) The ID of the source backup to create the filesystem from. * `export_path` - (Optional) S3 URI (with optional prefix) where the root of your Amazon FSx file system is exported. Can only be specified with `import_path` argument and the path must use the same Amazon S3 bucket as specified in `import_path`. Set equal to `import_path` to overwrite files on export. Defaults to `s3://{IMPORT BUCKET}/FSxLustre{CREATION TIMESTAMP}`. * `import_path` - (Optional) S3 URI (with optional prefix) that you're using as the data repository for your FSx for Lustre file system. For example, `s3://example-bucket/optional-prefix/`. * `imported_file_chunk_size` - (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. Can only be specified with `import_path` argument. Defaults to `1024`. Minimum of `1` and maximum of `512000`. From 064a4ff9dea55fc93a81031613268d9014afdf32 Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 19 Aug 2021 09:12:13 +0300 Subject: [PATCH 3/4] changelog --- .changelog/20614.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/20614.txt diff --git a/.changelog/20614.txt b/.changelog/20614.txt new file mode 100644 index 00000000000..dcb32a6a4b4 --- /dev/null +++ b/.changelog/20614.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_fsx_lustre_filesystem: Allow creating filesystem from backup using `backup_id`. +``` \ No newline at end of file From 09821db98154e594dfc65799bacaad702868dbaf Mon Sep 17 00:00:00 2001 From: drfaust92 Date: Thu, 19 Aug 2021 09:12:18 +0300 Subject: [PATCH 4/4] fmt --- aws/resource_aws_fsx_lustre_file_system_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_fsx_lustre_file_system_test.go b/aws/resource_aws_fsx_lustre_file_system_test.go index 31c53c74065..b2b0c17bb25 100644 --- a/aws/resource_aws_fsx_lustre_file_system_test.go +++ b/aws/resource_aws_fsx_lustre_file_system_test.go @@ -1147,7 +1147,7 @@ resource "aws_fsx_lustre_file_system" "test" { } func testAccAwsFsxLustreFileSystemFromBackup() string { - return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), fmt.Sprintf(` + return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), ` resource "aws_fsx_lustre_file_system" "base" { storage_capacity = 1200 subnet_ids = [aws_subnet.test1.id] @@ -1166,7 +1166,7 @@ resource "aws_fsx_lustre_file_system" "test" { per_unit_storage_throughput = 50 backup_id = aws_fsx_backup.test.id } -`)) +`) } func testAccAwsFsxLustreFileSystemConfigKmsKeyId1() string {