diff --git a/aws/fsx.go b/aws/fsx.go new file mode 100644 index 00000000000..f8c5450b1c9 --- /dev/null +++ b/aws/fsx.go @@ -0,0 +1,77 @@ +package aws + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform/helper/resource" +) + +func describeFsxFileSystem(conn *fsx.FSx, id string) (*fsx.FileSystem, error) { + input := &fsx.DescribeFileSystemsInput{ + FileSystemIds: []*string{aws.String(id)}, + } + var filesystem *fsx.FileSystem + + err := conn.DescribeFileSystemsPages(input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { + for _, fs := range page.FileSystems { + if aws.StringValue(fs.FileSystemId) == id { + filesystem = fs + return false + } + } + + return !lastPage + }) + + return filesystem, err +} + +func refreshFsxFileSystemLifecycle(conn *fsx.FSx, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + filesystem, err := describeFsxFileSystem(conn, id) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + if filesystem == nil { + return nil, "", nil + } + + return filesystem, aws.StringValue(filesystem.Lifecycle), nil + } +} + +func waitForFsxFileSystemCreation(conn *fsx.FSx, id string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileSystemLifecycleCreating}, + Target: []string{fsx.FileSystemLifecycleAvailable}, + Refresh: refreshFsxFileSystemLifecycle(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} + +func waitForFsxFileSystemDeletion(conn *fsx.FSx, id string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.FileSystemLifecycleAvailable, fsx.FileSystemLifecycleDeleting}, + Target: []string{}, + Refresh: refreshFsxFileSystemLifecycle(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + _, err := stateConf.WaitForState() + + return err +} diff --git a/aws/provider.go b/aws/provider.go index 93966053b04..a3597a77c9a 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -496,6 +496,8 @@ func Provider() terraform.ResourceProvider { "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), "aws_flow_log": resourceAwsFlowLog(), + "aws_fsx_lustre_file_system": resourceAwsFsxLustreFileSystem(), + "aws_fsx_windows_file_system": resourceAwsFsxWindowsFileSystem(), "aws_fms_admin_account": resourceAwsFmsAdminAccount(), "aws_gamelift_alias": resourceAwsGameliftAlias(), "aws_gamelift_build": resourceAwsGameliftBuild(), diff --git a/aws/resource_aws_directory_service_directory_test.go b/aws/resource_aws_directory_service_directory_test.go index ffbc21cb002..8cbd5b5118b 100644 --- a/aws/resource_aws_directory_service_directory_test.go +++ b/aws/resource_aws_directory_service_directory_test.go @@ -16,8 +16,9 @@ import ( func init() { resource.AddTestSweepers("aws_directory_service_directory", &resource.Sweeper{ - Name: "aws_directory_service_directory", - F: testSweepDirectoryServiceDirectories, + Name: "aws_directory_service_directory", + F: testSweepDirectoryServiceDirectories, + Dependencies: []string{"aws_fsx_windows_file_system"}, }) } diff --git a/aws/resource_aws_fsx_lustre_file_system.go b/aws/resource_aws_fsx_lustre_file_system.go new file mode 100644 index 00000000000..f9ac456c0f1 --- /dev/null +++ b/aws/resource_aws_fsx_lustre_file_system.go @@ -0,0 +1,296 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceAwsFsxLustreFileSystem() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsFsxLustreFileSystemCreate, + Read: resourceAwsFsxLustreFileSystemRead, + Update: resourceAwsFsxLustreFileSystemUpdate, + Delete: resourceAwsFsxLustreFileSystemDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "export_path": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 900), + validation.StringMatch(regexp.MustCompile(`^s3://`), "must begin with s3://"), + ), + }, + "import_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 900), + validation.StringMatch(regexp.MustCompile(`^s3://`), "must begin with s3://"), + ), + }, + "imported_file_chunk_size": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1, 512000), + }, + "network_interface_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 50, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "storage_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(3600), + }, + "subnet_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": tagsSchema(), + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "weekly_maintenance_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(7, 7), + validation.StringMatch(regexp.MustCompile(`^[1-7]:([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format d:HH:MM"), + ), + }, + }, + } +} + +func resourceAwsFsxLustreFileSystemCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + input := &fsx.CreateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemType: aws.String(fsx.FileSystemTypeLustre), + StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), + SubnetIds: expandStringSet(d.Get("subnet_ids").(*schema.Set)), + } + + if v, ok := d.GetOk("export_path"); ok { + if input.LustreConfiguration == nil { + input.LustreConfiguration = &fsx.CreateFileSystemLustreConfiguration{} + } + + input.LustreConfiguration.ExportPath = aws.String(v.(string)) + } + + if v, ok := d.GetOk("import_path"); ok { + if input.LustreConfiguration == nil { + input.LustreConfiguration = &fsx.CreateFileSystemLustreConfiguration{} + } + + input.LustreConfiguration.ImportPath = aws.String(v.(string)) + } + + if v, ok := d.GetOk("imported_file_chunk_size"); ok { + if input.LustreConfiguration == nil { + input.LustreConfiguration = &fsx.CreateFileSystemLustreConfiguration{} + } + + input.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("security_group_ids"); ok { + input.SecurityGroupIds = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = tagsFromMapFSX(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { + if input.LustreConfiguration == nil { + input.LustreConfiguration = &fsx.CreateFileSystemLustreConfiguration{} + } + + input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + } + + result, err := conn.CreateFileSystem(input) + if err != nil { + return fmt.Errorf("Error creating FSx filesystem: %s", err) + } + + d.SetId(*result.FileSystem.FileSystemId) + + log.Println("[DEBUG] Waiting for filesystem to become available") + + if err := waitForFsxFileSystemCreation(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("Error waiting for filesystem (%s) to become available: %s", d.Id(), err) + } + + return resourceAwsFsxLustreFileSystemRead(d, meta) +} + +func resourceAwsFsxLustreFileSystemUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + if d.HasChange("tags") { + if err := setTagsFSX(conn, d); err != nil { + return fmt.Errorf("Error updating tags for FSx filesystem: %s", err) + } + } + + requestUpdate := false + input := &fsx.UpdateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemId: aws.String(d.Id()), + LustreConfiguration: &fsx.UpdateFileSystemLustreConfiguration{}, + } + + if d.HasChange("weekly_maintenance_start_time") { + input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string)) + requestUpdate = true + } + + if requestUpdate { + _, err := conn.UpdateFileSystem(input) + if err != nil { + return fmt.Errorf("error updating FSX File System (%s): %s", d.Id(), err) + } + } + + return resourceAwsFsxLustreFileSystemRead(d, meta) +} + +func resourceAwsFsxLustreFileSystemRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + filesystem, err := describeFsxFileSystem(conn, d.Id()) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("Error reading FSx File System (%s): %s", d.Id(), err) + } + + if filesystem == nil { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if filesystem.WindowsConfiguration != nil { + return fmt.Errorf("expected FSx Lustre File System, found FSx Windows File System: %s", d.Id()) + } + + if filesystem.LustreConfiguration == nil { + return fmt.Errorf("error describing FSx Lustre File System (%s): empty Lustre configuration", d.Id()) + } + + if filesystem.LustreConfiguration.DataRepositoryConfiguration == nil { + // Initialize an empty structure to simplify d.Set() handling + filesystem.LustreConfiguration.DataRepositoryConfiguration = &fsx.DataRepositoryConfiguration{} + } + + d.Set("arn", filesystem.ResourceARN) + d.Set("dns_name", filesystem.DNSName) + d.Set("export_path", filesystem.LustreConfiguration.DataRepositoryConfiguration.ExportPath) + d.Set("import_path", filesystem.LustreConfiguration.DataRepositoryConfiguration.ImportPath) + d.Set("imported_file_chunk_size", filesystem.LustreConfiguration.DataRepositoryConfiguration.ImportedFileChunkSize) + + if err := d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)); err != nil { + return fmt.Errorf("error setting network_interface_ids: %s", err) + } + + d.Set("owner_id", filesystem.OwnerId) + d.Set("storage_capacity", filesystem.StorageCapacity) + + if err := d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)); err != nil { + return fmt.Errorf("error setting subnet_ids: %s", err) + } + + if err := d.Set("tags", tagsToMapFSX(filesystem.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + d.Set("vpc_id", filesystem.VpcId) + d.Set("weekly_maintenance_start_time", filesystem.LustreConfiguration.WeeklyMaintenanceStartTime) + + return nil +} + +func resourceAwsFsxLustreFileSystemDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + request := &fsx.DeleteFileSystemInput{ + FileSystemId: aws.String(d.Id()), + } + + _, err := conn.DeleteFileSystem(request) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + return nil + } + + if err != nil { + return fmt.Errorf("Error deleting FSx filesystem: %s", err) + } + + log.Println("[DEBUG] Waiting for filesystem to delete") + + if err := waitForFsxFileSystemDeletion(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("Error waiting for filesystem (%s) to delete: %s", d.Id(), err) + } + + return nil +} diff --git a/aws/resource_aws_fsx_lustre_file_system_test.go b/aws/resource_aws_fsx_lustre_file_system_test.go new file mode 100644 index 00000000000..8e0c3179a41 --- /dev/null +++ b/aws/resource_aws_fsx_lustre_file_system_test.go @@ -0,0 +1,666 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func init() { + resource.AddTestSweepers("aws_fsx_lustre_file_system", &resource.Sweeper{ + Name: "aws_fsx_lustre_file_system", + F: testSweepFSXLustreFileSystems, + }) +} + +func testSweepFSXLustreFileSystems(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).fsxconn + input := &fsx.DescribeFileSystemsInput{} + + err = conn.DescribeFileSystemsPages(input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { + for _, fs := range page.FileSystems { + if aws.StringValue(fs.FileSystemType) != fsx.FileSystemTypeLustre { + continue + } + + input := &fsx.DeleteFileSystemInput{ + FileSystemId: fs.FileSystemId, + } + + log.Printf("[INFO] Deleting FSx lustre filesystem: %s", aws.StringValue(fs.FileSystemId)) + _, err := conn.DeleteFileSystem(input) + + if err != nil { + log.Printf("[ERROR] Error deleting FSx filesystem: %s", err) + continue + } + + if err := waitForFsxFileSystemDeletion(conn, aws.StringValue(fs.FileSystemId), 30*time.Minute); err != nil { + log.Printf("[ERROR] Error waiting for filesystem (%s) to delete: %s", aws.StringValue(fs.FileSystemId), err) + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping FSx Lustre Filesystem sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing FSx Lustre Filesystems: %s", err) + } + + return nil + +} + +func TestAccAWSFsxLustreFileSystem_basic(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemConfigSubnetIds1(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`file-system/fs-.+`)), + resource.TestMatchResourceAttr(resourceName, "dns_name", regexp.MustCompile(`fs-.+\.fsx\.`)), + resource.TestCheckResourceAttr(resourceName, "export_path", ""), + resource.TestCheckResourceAttr(resourceName, "import_path", ""), + resource.TestCheckResourceAttr(resourceName, "imported_file_chunk_size", "0"), + resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", "2"), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "3600"), + resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestMatchResourceAttr(resourceName, "vpc_id", regexp.MustCompile(`^vpc-.+`)), + resource.TestMatchResourceAttr(resourceName, "weekly_maintenance_start_time", regexp.MustCompile(`^\d:\d\d:\d\d$`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + }, + }) +} + +func TestAccAWSFsxLustreFileSystem_disappears(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemConfigSubnetIds1(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem), + testAccCheckFsxLustreFileSystemDisappears(&filesystem), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSFsxLustreFileSystem_ExportPath(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemConfigExportPath(rName, ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "export_path", fmt.Sprintf("s3://%s", rName)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccAwsFsxLustreFileSystemConfigExportPath(rName, "/prefix/"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxLustreFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "export_path", fmt.Sprintf("s3://%s/prefix/", rName)), + ), + }, + }, + }) +} + +func TestAccAWSFsxLustreFileSystem_ImportPath(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemConfigImportPath(rName, ""), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "import_path", fmt.Sprintf("s3://%s", rName)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccAwsFsxLustreFileSystemConfigImportPath(rName, "/prefix/"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxLustreFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "import_path", fmt.Sprintf("s3://%s/prefix/", rName)), + ), + }, + }, + }) +} + +func TestAccAWSFsxLustreFileSystem_ImportedFileChunkSize(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemConfigImportedFileChunkSize(rName, 2048), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "imported_file_chunk_size", "2048"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccAwsFsxLustreFileSystemConfigImportedFileChunkSize(rName, 4096), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxLustreFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "imported_file_chunk_size", "4096"), + ), + }, + }, + }) +} + +func TestAccAWSFsxLustreFileSystem_SecurityGroupIds(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemConfigSecurityGroupIds1(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccAwsFsxLustreFileSystemConfigSecurityGroupIds2(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxLustreFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "2"), + ), + }, + }, + }) +} + +func TestAccAWSFsxLustreFileSystem_StorageCapacity(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemConfigStorageCapacity(7200), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "7200"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccAwsFsxLustreFileSystemConfigStorageCapacity(3600), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxLustreFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "3600"), + ), + }, + }, + }) +} + +func TestAccAWSFsxLustreFileSystem_Tags(t *testing.T) { + var filesystem1, filesystem2, filesystem3 fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemConfigTags1("key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccAwsFsxLustreFileSystemConfigTags2("key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxLustreFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAwsFsxLustreFileSystemConfigTags1("key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem3), + testAccCheckFsxLustreFileSystemNotRecreated(&filesystem2, &filesystem3), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func TestAccAWSFsxLustreFileSystem_WeeklyMaintenanceStartTime(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_lustre_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxLustreFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxLustreFileSystemConfigWeeklyMaintenanceStartTime("1:01:01"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "weekly_maintenance_start_time", "1:01:01"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccAwsFsxLustreFileSystemConfigWeeklyMaintenanceStartTime("2:02:02"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxLustreFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "weekly_maintenance_start_time", "2:02:02"), + ), + }, + }, + }) +} + +func testAccCheckFsxLustreFileSystemExists(resourceName string, fs *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).fsxconn + + filesystem, err := describeFsxFileSystem(conn, rs.Primary.ID) + + if err != nil { + return err + } + + if filesystem == nil { + return fmt.Errorf("FSx File System (%s) not found", rs.Primary.ID) + } + + *fs = *filesystem + + return nil + } +} + +func testAccCheckFsxLustreFileSystemDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).fsxconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fsx_lustre_file_system" { + continue + } + + filesystem, err := describeFsxFileSystem(conn, rs.Primary.ID) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + continue + } + + if err != nil { + return err + } + + if filesystem != nil { + return fmt.Errorf("FSx File System (%s) still exists", rs.Primary.ID) + } + } + return nil +} + +func testAccCheckFsxLustreFileSystemDisappears(filesystem *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).fsxconn + + input := &fsx.DeleteFileSystemInput{ + FileSystemId: filesystem.FileSystemId, + } + + _, err := conn.DeleteFileSystem(input) + + if err != nil { + return err + } + + return waitForFsxFileSystemDeletion(conn, aws.StringValue(filesystem.FileSystemId), 30*time.Minute) + } +} + +func testAccCheckFsxLustreFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { + return fmt.Errorf("FSx File System (%s) recreated", aws.StringValue(i.FileSystemId)) + } + + return nil + } +} + +func testAccCheckFsxLustreFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { + return fmt.Errorf("FSx File System (%s) not recreated", aws.StringValue(i.FileSystemId)) + } + + return nil + } +} + +func testAccAwsFsxLustreFileSystemConfigBase() string { + return fmt.Sprintf(` +data "aws_availability_zones" "available" { + state = "available" +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test1" { + vpc_id = "${aws_vpc.test.id}" + cidr_block = "10.0.1.0/24" + availability_zone = "${data.aws_availability_zones.available.names[0]}" +} +`) +} + +func testAccAwsFsxLustreFileSystemConfigExportPath(rName, exportPrefix string) string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + acl = "private" + bucket = %[1]q +} + +resource "aws_fsx_lustre_file_system" "test" { + export_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" + import_path = "s3://${aws_s3_bucket.test.bucket}" + storage_capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] +} +`, rName, exportPrefix) +} + +func testAccAwsFsxLustreFileSystemConfigImportPath(rName, importPrefix string) string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + acl = "private" + bucket = %[1]q +} + +resource "aws_fsx_lustre_file_system" "test" { + import_path = "s3://${aws_s3_bucket.test.bucket}%[2]s" + storage_capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] +} +`, rName, importPrefix) +} + +func testAccAwsFsxLustreFileSystemConfigImportedFileChunkSize(rName string, importedFileChunkSize int) string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + acl = "private" + bucket = %[1]q +} + +resource "aws_fsx_lustre_file_system" "test" { + import_path = "s3://${aws_s3_bucket.test.bucket}" + imported_file_chunk_size = %[2]d + storage_capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] +} +`, rName, importedFileChunkSize) +} + +func testAccAwsFsxLustreFileSystemConfigSecurityGroupIds1() string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_security_group" "test1" { + description = "security group for FSx testing" + vpc_id = "${aws_vpc.test.id}" + + ingress { + cidr_blocks = ["${aws_vpc.test.cidr_block}"] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } +} + +resource "aws_fsx_lustre_file_system" "test" { + security_group_ids = ["${aws_security_group.test1.id}"] + storage_capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] +} +`) +} + +func testAccAwsFsxLustreFileSystemConfigSecurityGroupIds2() string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_security_group" "test1" { + description = "security group for FSx testing" + vpc_id = "${aws_vpc.test.id}" + + ingress { + cidr_blocks = ["${aws_vpc.test.cidr_block}"] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } +} + +resource "aws_security_group" "test2" { + description = "security group for FSx testing" + vpc_id = "${aws_vpc.test.id}" + + ingress { + cidr_blocks = ["${aws_vpc.test.cidr_block}"] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } +} + +resource "aws_fsx_lustre_file_system" "test" { + security_group_ids = ["${aws_security_group.test1.id}", "${aws_security_group.test2.id}"] + storage_capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] +} +`) +} + +func testAccAwsFsxLustreFileSystemConfigStorageCapacity(storageCapacity int) string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_lustre_file_system" "test" { + storage_capacity = %[1]d + subnet_ids = ["${aws_subnet.test1.id}"] +} +`, storageCapacity) +} + +func testAccAwsFsxLustreFileSystemConfigSubnetIds1() string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_lustre_file_system" "test" { + storage_capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] +} +`) +} + +func testAccAwsFsxLustreFileSystemConfigTags1(tagKey1, tagValue1 string) string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_lustre_file_system" "test" { + storage_capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] + + tags = { + %[1]q = %[2]q + } +} +`, tagKey1, tagValue1) +} + +func testAccAwsFsxLustreFileSystemConfigTags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_lustre_file_system" "test" { + storage_capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] + + tags = { + %[1]q = %[2]q + %[3]q = %[4]q + } +} +`, tagKey1, tagValue1, tagKey2, tagValue2) +} + +func testAccAwsFsxLustreFileSystemConfigWeeklyMaintenanceStartTime(weeklyMaintenanceStartTime string) string { + return testAccAwsFsxLustreFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_lustre_file_system" "test" { + storage_capacity = 3600 + subnet_ids = ["${aws_subnet.test1.id}"] + weekly_maintenance_start_time = %[1]q +} +`, weeklyMaintenanceStartTime) +} diff --git a/aws/resource_aws_fsx_windows_file_system.go b/aws/resource_aws_fsx_windows_file_system.go new file mode 100644 index 00000000000..b29112a1d9f --- /dev/null +++ b/aws/resource_aws_fsx_windows_file_system.go @@ -0,0 +1,439 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" +) + +func resourceAwsFsxWindowsFileSystem() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsFsxWindowsFileSystemCreate, + Read: resourceAwsFsxWindowsFileSystemRead, + Update: resourceAwsFsxWindowsFileSystemUpdate, + Delete: resourceAwsFsxWindowsFileSystemDelete, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("skip_final_backup", false) + + return []*schema.ResourceData{d}, nil + }, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "active_directory_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"self_managed_active_directory"}, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "automatic_backup_retention_days": { + Type: schema.TypeInt, + Optional: true, + Default: 7, + ValidateFunc: validation.IntBetween(0, 35), + }, + "copy_tags_to_backups": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "daily_automatic_backup_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(5, 5), + validation.StringMatch(regexp.MustCompile(`^([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format HH:MM"), + ), + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateArn, + }, + "network_interface_ids": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 50, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "self_managed_active_directory": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"active_directory_id"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns_ips": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 2, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "domain_name": { + Type: schema.TypeString, + Required: true, + }, + "file_system_administrators_group": { + Type: schema.TypeString, + Optional: true, + Default: "Domain Admins", + ValidateFunc: validation.StringLenBetween(1, 256), + }, + "organizational_unit_distinguished_name": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 2000), + }, + "password": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + "username": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 256), + }, + }, + }, + }, + "skip_final_backup": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "storage_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(300, 65536), + }, + "subnet_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": tagsSchema(), + "throughput_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(8, 2048), + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "weekly_maintenance_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(7, 7), + validation.StringMatch(regexp.MustCompile(`^[1-7]:([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format d:HH:MM"), + ), + }, + }, + } +} + +func resourceAwsFsxWindowsFileSystemCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + input := &fsx.CreateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemType: aws.String(fsx.FileSystemTypeWindows), + StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), + SubnetIds: expandStringSet(d.Get("subnet_ids").(*schema.Set)), + WindowsConfiguration: &fsx.CreateFileSystemWindowsConfiguration{ + AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), + CopyTagsToBackups: aws.Bool(d.Get("copy_tags_to_backups").(bool)), + ThroughputCapacity: aws.Int64(int64(d.Get("throughput_capacity").(int))), + }, + } + + if v, ok := d.GetOk("active_directory_id"); ok { + input.WindowsConfiguration.ActiveDirectoryId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok { + input.WindowsConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) + } + + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("security_group_ids"); ok { + input.SecurityGroupIds = expandStringSet(v.(*schema.Set)) + } + + if v, ok := d.GetOk("self_managed_active_directory"); ok { + input.WindowsConfiguration.SelfManagedActiveDirectoryConfiguration = expandFsxSelfManagedActiveDirectoryConfigurationCreate(v.([]interface{})) + } + + if v, ok := d.GetOk("tags"); ok { + input.Tags = tagsFromMapFSX(v.(map[string]interface{})) + } + + if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { + input.WindowsConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + } + + result, err := conn.CreateFileSystem(input) + if err != nil { + return fmt.Errorf("Error creating FSx filesystem: %s", err) + } + + d.SetId(*result.FileSystem.FileSystemId) + + log.Println("[DEBUG] Waiting for filesystem to become available") + + if err := waitForFsxFileSystemCreation(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("Error waiting for filesystem (%s) to become available: %s", d.Id(), err) + } + + return resourceAwsFsxWindowsFileSystemRead(d, meta) +} + +func resourceAwsFsxWindowsFileSystemUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + if d.HasChange("tags") { + if err := setTagsFSX(conn, d); err != nil { + return fmt.Errorf("Error updating tags for FSx filesystem: %s", err) + } + } + + requestUpdate := false + input := &fsx.UpdateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemId: aws.String(d.Id()), + WindowsConfiguration: &fsx.UpdateFileSystemWindowsConfiguration{}, + } + + if d.HasChange("automatic_backup_retention_days") { + input.WindowsConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))) + requestUpdate = true + } + + if d.HasChange("daily_automatic_backup_start_time") { + input.WindowsConfiguration.DailyAutomaticBackupStartTime = aws.String(d.Get("daily_automatic_backup_start_time").(string)) + requestUpdate = true + } + + if d.HasChange("self_managed_active_directory") { + input.WindowsConfiguration.SelfManagedActiveDirectoryConfiguration = expandFsxSelfManagedActiveDirectoryConfigurationUpdate(d.Get("self_managed_active_directory").([]interface{})) + requestUpdate = true + } + + if d.HasChange("weekly_maintenance_start_time") { + input.WindowsConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string)) + requestUpdate = true + } + + if requestUpdate { + _, err := conn.UpdateFileSystem(input) + if err != nil { + return fmt.Errorf("error updating FSX File System (%s): %s", d.Id(), err) + } + } + + return resourceAwsFsxWindowsFileSystemRead(d, meta) +} + +func resourceAwsFsxWindowsFileSystemRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + filesystem, err := describeFsxFileSystem(conn, d.Id()) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("Error reading FSx File System (%s): %s", d.Id(), err) + } + + if filesystem == nil { + log.Printf("[WARN] FSx File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if filesystem.LustreConfiguration != nil { + return fmt.Errorf("expected FSx Windows File System, found FSx Lustre File System: %s", d.Id()) + } + + if filesystem.WindowsConfiguration == nil { + return fmt.Errorf("error describing FSx Windows File System (%s): empty Windows configuration", d.Id()) + } + + d.Set("active_directory_id", filesystem.WindowsConfiguration.ActiveDirectoryId) + d.Set("arn", filesystem.ResourceARN) + d.Set("automatic_backup_retention_days", filesystem.WindowsConfiguration.AutomaticBackupRetentionDays) + d.Set("copy_tags_to_backups", filesystem.WindowsConfiguration.CopyTagsToBackups) + d.Set("daily_automatic_backup_start_time", filesystem.WindowsConfiguration.DailyAutomaticBackupStartTime) + d.Set("dns_name", filesystem.DNSName) + d.Set("kms_key_id", filesystem.KmsKeyId) + + if err := d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)); err != nil { + return fmt.Errorf("error setting network_interface_ids: %s", err) + } + + d.Set("owner_id", filesystem.OwnerId) + + if err := d.Set("self_managed_active_directory", flattenFsxSelfManagedActiveDirectoryConfiguration(d, filesystem.WindowsConfiguration.SelfManagedActiveDirectoryConfiguration)); err != nil { + return fmt.Errorf("error setting self_managed_active_directory: %s", err) + } + + d.Set("storage_capacity", filesystem.StorageCapacity) + + if err := d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)); err != nil { + return fmt.Errorf("error setting subnet_ids: %s", err) + } + + if err := d.Set("tags", tagsToMapFSX(filesystem.Tags)); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + + d.Set("throughput_capacity", filesystem.WindowsConfiguration.ThroughputCapacity) + d.Set("vpc_id", filesystem.VpcId) + d.Set("weekly_maintenance_start_time", filesystem.WindowsConfiguration.WeeklyMaintenanceStartTime) + + return nil +} + +func resourceAwsFsxWindowsFileSystemDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).fsxconn + + input := &fsx.DeleteFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemId: aws.String(d.Id()), + WindowsConfiguration: &fsx.DeleteFileSystemWindowsConfiguration{ + SkipFinalBackup: aws.Bool(d.Get("skip_final_backup").(bool)), + }, + } + + _, err := conn.DeleteFileSystem(input) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + return nil + } + + if err != nil { + return fmt.Errorf("Error deleting FSx filesystem: %s", err) + } + + log.Println("[DEBUG] Waiting for filesystem to delete") + + if err := waitForFsxFileSystemDeletion(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("Error waiting for filesystem (%s) to delete: %s", d.Id(), err) + } + + return nil +} + +func expandFsxSelfManagedActiveDirectoryConfigurationCreate(l []interface{}) *fsx.SelfManagedActiveDirectoryConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.SelfManagedActiveDirectoryConfiguration{ + DomainName: aws.String(data["domain_name"].(string)), + DnsIps: expandStringSet(data["dns_ips"].(*schema.Set)), + Password: aws.String(data["password"].(string)), + UserName: aws.String(data["username"].(string)), + } + + if v, ok := data["file_system_administrators_group"]; ok && v.(string) != "" { + req.FileSystemAdministratorsGroup = aws.String(v.(string)) + } + + if v, ok := data["organizational_unit_distinguished_name"]; ok && v.(string) != "" { + req.OrganizationalUnitDistinguishedName = aws.String(v.(string)) + } + + return req +} + +func expandFsxSelfManagedActiveDirectoryConfigurationUpdate(l []interface{}) *fsx.SelfManagedActiveDirectoryConfigurationUpdates { + if len(l) == 0 || l[0] == nil { + return nil + } + + data := l[0].(map[string]interface{}) + req := &fsx.SelfManagedActiveDirectoryConfigurationUpdates{ + DnsIps: expandStringList(data["dns_ips"].([]interface{})), + Password: aws.String(data["password"].(string)), + UserName: aws.String(data["username"].(string)), + } + + return req +} + +func flattenFsxSelfManagedActiveDirectoryConfiguration(d *schema.ResourceData, adopts *fsx.SelfManagedActiveDirectoryAttributes) []map[string]interface{} { + if adopts == nil { + return []map[string]interface{}{} + } + + // Since we are in a configuration block and the FSx API does not return + // the password, we need to set the value if we can or Terraform will + // show a difference for the argument from empty string to the value. + // This is not a pattern that should be used normally. + // See also: flattenEmrKerberosAttributes + + m := map[string]interface{}{ + "dns_ips": aws.StringValueSlice(adopts.DnsIps), + "domain_name": aws.StringValue(adopts.DomainName), + "file_system_administrators_group": aws.StringValue(adopts.FileSystemAdministratorsGroup), + "organizational_unit_distinguished_name": aws.StringValue(adopts.OrganizationalUnitDistinguishedName), + "password": d.Get("self_managed_active_directory.0.password").(string), + "username": aws.StringValue(adopts.UserName), + } + + return []map[string]interface{}{m} +} diff --git a/aws/resource_aws_fsx_windows_file_system_test.go b/aws/resource_aws_fsx_windows_file_system_test.go new file mode 100644 index 00000000000..c617f427d21 --- /dev/null +++ b/aws/resource_aws_fsx_windows_file_system_test.go @@ -0,0 +1,905 @@ +package aws + +import ( + "fmt" + "log" + "regexp" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func init() { + resource.AddTestSweepers("aws_fsx_windows_file_system", &resource.Sweeper{ + Name: "aws_fsx_windows_file_system", + F: testSweepFSXWindowsFileSystems, + }) +} + +func testSweepFSXWindowsFileSystems(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).fsxconn + input := &fsx.DescribeFileSystemsInput{} + + err = conn.DescribeFileSystemsPages(input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { + for _, fs := range page.FileSystems { + if aws.StringValue(fs.FileSystemType) != fsx.FileSystemTypeWindows { + continue + } + + input := &fsx.DeleteFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemId: fs.FileSystemId, + WindowsConfiguration: &fsx.DeleteFileSystemWindowsConfiguration{ + SkipFinalBackup: aws.Bool(true), + }, + } + + log.Printf("[INFO] Deleting FSx windows filesystem: %s", aws.StringValue(fs.FileSystemId)) + _, err := conn.DeleteFileSystem(input) + + if err != nil { + log.Printf("[ERROR] Error deleting FSx filesystem: %s", err) + continue + } + + if err := waitForFsxFileSystemDeletion(conn, aws.StringValue(fs.FileSystemId), 30*time.Minute); err != nil { + log.Printf("[ERROR] Error waiting for filesystem (%s) to delete: %s", aws.StringValue(fs.FileSystemId), err) + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping FSx Windows Filesystem sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing FSx Windows Filesystems: %s", err) + } + + return nil + +} + +func TestAccAWSFsxWindowsFileSystem_basic(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigSubnetIds1(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`file-system/fs-.+`)), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "7"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), + resource.TestMatchResourceAttr(resourceName, "daily_automatic_backup_start_time", regexp.MustCompile(`^\d\d:\d\d$`)), + resource.TestMatchResourceAttr(resourceName, "dns_name", regexp.MustCompile(`fs-.+\..+`)), + resource.TestMatchResourceAttr(resourceName, "kms_key_id", regexp.MustCompile(`^arn:`)), + resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", "1"), + testAccCheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "self_managed_active_directory.#", "0"), + resource.TestCheckResourceAttr(resourceName, "skip_final_backup", "true"), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "300"), + resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "8"), + resource.TestMatchResourceAttr(resourceName, "vpc_id", regexp.MustCompile(`^vpc-.+`)), + resource.TestMatchResourceAttr(resourceName, "weekly_maintenance_start_time", regexp.MustCompile(`^\d:\d\d:\d\d$`)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_disappears(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigSubnetIds1(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem), + testAccCheckFsxWindowsFileSystemDisappears(&filesystem), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_AutomaticBackupRetentionDays(t *testing.T) { + var filesystem1, filesystem2, filesystem3 fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigAutomaticBackupRetentionDays(35), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "35"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigAutomaticBackupRetentionDays(0), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxWindowsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), + ), + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigAutomaticBackupRetentionDays(14), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem3), + testAccCheckFsxWindowsFileSystemNotRecreated(&filesystem2, &filesystem3), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "14"), + ), + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_CopyTagsToBackups(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigCopyTagsToBackups(true), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigCopyTagsToBackups(false), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxWindowsFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), + ), + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_DailyAutomaticBackupStartTime(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigDailyAutomaticBackupStartTime("01:01"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", "01:01"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigDailyAutomaticBackupStartTime("02:02"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxWindowsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", "02:02"), + ), + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_KmsKeyId(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + kmsKeyResourceName1 := "aws_kms_key.test1" + kmsKeyResourceName2 := "aws_kms_key.test2" + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigKmsKeyId1(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName1, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigKmsKeyId2(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxWindowsFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", kmsKeyResourceName2, "arn"), + ), + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_SecurityGroupIds(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigSecurityGroupIds1(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigSecurityGroupIds2(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxWindowsFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "2"), + ), + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_SelfManagedActiveDirectory(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigSelfManagedActiveDirectory(), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "self_managed_active_directory.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "self_managed_active_directory", + "skip_final_backup", + }, + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_StorageCapacity(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigStorageCapacity(301), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "301"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigStorageCapacity(302), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxWindowsFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "302"), + ), + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_Tags(t *testing.T) { + var filesystem1, filesystem2, filesystem3 fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigTags1("key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigTags2("key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxWindowsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigTags1("key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem3), + testAccCheckFsxWindowsFileSystemNotRecreated(&filesystem2, &filesystem3), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_ThroughputCapacity(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigThroughputCapacity(16), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "16"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigThroughputCapacity(32), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxWindowsFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "32"), + ), + }, + }, + }) +} + +func TestAccAWSFsxWindowsFileSystem_WeeklyMaintenanceStartTime(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_windows_file_system.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFsxWindowsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsFsxWindowsFileSystemConfigWeeklyMaintenanceStartTime("1:01:01"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "weekly_maintenance_start_time", "1:01:01"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "security_group_ids", + "skip_final_backup", + }, + }, + { + Config: testAccAwsFsxWindowsFileSystemConfigWeeklyMaintenanceStartTime("2:02:02"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxWindowsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxWindowsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "weekly_maintenance_start_time", "2:02:02"), + ), + }, + }, + }) +} + +func testAccCheckFsxWindowsFileSystemExists(resourceName string, fs *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + conn := testAccProvider.Meta().(*AWSClient).fsxconn + + filesystem, err := describeFsxFileSystem(conn, rs.Primary.ID) + + if err != nil { + return err + } + + if filesystem == nil { + return fmt.Errorf("FSx File System (%s) not found", rs.Primary.ID) + } + + *fs = *filesystem + + return nil + } +} + +func testAccCheckFsxWindowsFileSystemDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).fsxconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fsx_windows_file_system" { + continue + } + + filesystem, err := describeFsxFileSystem(conn, rs.Primary.ID) + + if isAWSErr(err, fsx.ErrCodeFileSystemNotFound, "") { + continue + } + + if err != nil { + return err + } + + if filesystem != nil { + return fmt.Errorf("FSx File System (%s) still exists", rs.Primary.ID) + } + } + return nil +} + +func testAccCheckFsxWindowsFileSystemDisappears(filesystem *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).fsxconn + + input := &fsx.DeleteFileSystemInput{ + FileSystemId: filesystem.FileSystemId, + } + + _, err := conn.DeleteFileSystem(input) + + if err != nil { + return err + } + + return waitForFsxFileSystemDeletion(conn, aws.StringValue(filesystem.FileSystemId), 30*time.Minute) + } +} + +func testAccCheckFsxWindowsFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { + return fmt.Errorf("FSx File System (%s) recreated", aws.StringValue(i.FileSystemId)) + } + + return nil + } +} + +func testAccCheckFsxWindowsFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { + return fmt.Errorf("FSx File System (%s) not recreated", aws.StringValue(i.FileSystemId)) + } + + return nil + } +} + +func testAccAwsFsxWindowsFileSystemConfigBase() string { + return fmt.Sprintf(` +data "aws_availability_zones" "available" { + state = "available" +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test1" { + vpc_id = "${aws_vpc.test.id}" + cidr_block = "10.0.1.0/24" + availability_zone = "${data.aws_availability_zones.available.names[0]}" +} + +resource "aws_subnet" "test2" { + vpc_id = "${aws_vpc.test.id}" + cidr_block = "10.0.2.0/24" + availability_zone = "${data.aws_availability_zones.available.names[1]}" +} + +resource "aws_directory_service_directory" "test" { + edition = "Standard" + name = "corp.notexample.com" + password = "SuperSecretPassw0rd" + type = "MicrosoftAD" + + vpc_settings { + subnet_ids = ["${aws_subnet.test1.id}", "${aws_subnet.test2.id}"] + vpc_id = "${aws_vpc.test.id}" + } +} +`) +} + +func testAccAwsFsxWindowsFileSystemConfigAutomaticBackupRetentionDays(automaticBackupRetentionDays int) string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + automatic_backup_retention_days = %[1]d + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 +} +`, automaticBackupRetentionDays) +} + +func testAccAwsFsxWindowsFileSystemConfigCopyTagsToBackups(copyTagsToBackups bool) string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + copy_tags_to_backups = %[1]t + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 +} +`, copyTagsToBackups) +} + +func testAccAwsFsxWindowsFileSystemConfigDailyAutomaticBackupStartTime(dailyAutomaticBackupStartTime string) string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + daily_automatic_backup_start_time = %[1]q + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 +} +`, dailyAutomaticBackupStartTime) +} + +func testAccAwsFsxWindowsFileSystemConfigKmsKeyId1() string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_kms_key" "test1" { + description = "FSx KMS Testing key" + deletion_window_in_days = 7 +} + +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + kms_key_id = "${aws_kms_key.test1.arn}" + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 +} +`) +} + +func testAccAwsFsxWindowsFileSystemConfigKmsKeyId2() string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_kms_key" "test2" { + description = "FSx KMS Testing key" + deletion_window_in_days = 7 +} + +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + kms_key_id = "${aws_kms_key.test2.arn}" + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 +} +`) +} + +func testAccAwsFsxWindowsFileSystemConfigSecurityGroupIds1() string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_security_group" "test1" { + description = "security group for FSx testing" + vpc_id = "${aws_vpc.test.id}" + + ingress { + cidr_blocks = ["${aws_vpc.test.cidr_block}"] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } +} + +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + security_group_ids = ["${aws_security_group.test1.id}"] + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 +} +`) +} + +func testAccAwsFsxWindowsFileSystemConfigSecurityGroupIds2() string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_security_group" "test1" { + description = "security group for FSx testing" + vpc_id = "${aws_vpc.test.id}" + + ingress { + cidr_blocks = ["${aws_vpc.test.cidr_block}"] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } +} + +resource "aws_security_group" "test2" { + description = "security group for FSx testing" + vpc_id = "${aws_vpc.test.id}" + + ingress { + cidr_blocks = ["${aws_vpc.test.cidr_block}"] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } +} + +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + security_group_ids = ["${aws_security_group.test1.id}", "${aws_security_group.test2.id}"] + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 +} +`) +} + +func testAccAwsFsxWindowsFileSystemConfigSelfManagedActiveDirectory() string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 + + self_managed_active_directory { + dns_ips = aws_directory_service_directory.test.dns_ip_addresses + domain_name = aws_directory_service_directory.test.name + password = aws_directory_service_directory.test.password + username = "Admin" + } +} +`) +} + +func testAccAwsFsxWindowsFileSystemConfigStorageCapacity(storageCapacity int) string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + skip_final_backup = true + storage_capacity = %[1]d + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 +} +`, storageCapacity) +} + +func testAccAwsFsxWindowsFileSystemConfigSubnetIds1() string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 +} +`) +} + +func testAccAwsFsxWindowsFileSystemConfigTags1(tagKey1, tagValue1 string) string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 + + tags = { + %[1]q = %[2]q + } +} +`, tagKey1, tagValue1) +} + +func testAccAwsFsxWindowsFileSystemConfigTags2(tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 + + tags = { + %[1]q = %[2]q + %[3]q = %[4]q + } +} +`, tagKey1, tagValue1, tagKey2, tagValue2) +} + +func testAccAwsFsxWindowsFileSystemConfigThroughputCapacity(throughputCapacity int) string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = %[1]d +} +`, throughputCapacity) +} + +func testAccAwsFsxWindowsFileSystemConfigWeeklyMaintenanceStartTime(weeklyMaintenanceStartTime string) string { + return testAccAwsFsxWindowsFileSystemConfigBase() + fmt.Sprintf(` +resource "aws_fsx_windows_file_system" "test" { + active_directory_id = "${aws_directory_service_directory.test.id}" + skip_final_backup = true + storage_capacity = 300 + subnet_ids = ["${aws_subnet.test1.id}"] + throughput_capacity = 8 + weekly_maintenance_start_time = %[1]q +} +`, weeklyMaintenanceStartTime) +} diff --git a/aws/resource_aws_subnet_test.go b/aws/resource_aws_subnet_test.go index 0b2a87b73e0..e0c89109eab 100644 --- a/aws/resource_aws_subnet_test.go +++ b/aws/resource_aws_subnet_test.go @@ -33,6 +33,8 @@ func init() { "aws_elasticsearch_domain", "aws_elb", "aws_emr_cluster", + "aws_fsx_lustre_file_system", + "aws_fsx_windows_file_system", "aws_lambda_function", "aws_lb", "aws_mq_broker", diff --git a/aws/tagsFSX.go b/aws/tagsFSX.go new file mode 100644 index 00000000000..213b84653df --- /dev/null +++ b/aws/tagsFSX.go @@ -0,0 +1,120 @@ +package aws + +import ( + "log" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags". It also expects to take the resource +// ARN as the primary ID based on the requirements of the FSx API (as +// opposed to the resource ID like other tagging helpers). +func setTagsFSX(conn *fsx.FSx, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsFSX(tagsFromMapFSX(o), tagsFromMapFSX(n)) + + // Set tags + if len(remove) > 0 { + log.Printf("[DEBUG] Removing tags: %#v", remove) + k := make([]*string, 0, len(remove)) + for _, t := range remove { + k = append(k, t.Key) + } + _, err := conn.UntagResource(&fsx.UntagResourceInput{ + ResourceARN: aws.String(d.Get("arn").(string)), + TagKeys: k, + }) + if err != nil { + return err + } + } + if len(create) > 0 { + log.Printf("[DEBUG] Creating tags: %#v", create) + _, err := conn.TagResource(&fsx.TagResourceInput{ + ResourceARN: aws.String(d.Get("arn").(string)), + Tags: create, + }) + if err != nil { + return err + } + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsFSX(oldTags, newTags []*fsx.Tag) ([]*fsx.Tag, []*fsx.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []*fsx.Tag + for _, t := range oldTags { + old, ok := create[aws.StringValue(t.Key)] + if !ok || old != aws.StringValue(t.Value) { + // Delete it! + remove = append(remove, t) + } else if ok { + // already present so remove from new + delete(create, aws.StringValue(t.Key)) + } + } + + return tagsFromMapFSX(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapFSX(m map[string]interface{}) []*fsx.Tag { + var result []*fsx.Tag + for k, v := range m { + t := &fsx.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + } + if !tagIgnoredFSX(t) { + result = append(result, t) + } + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapFSX(ts []*fsx.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + if !tagIgnoredFSX(t) { + result[*t.Key] = *t.Value + } + } + + return result +} + +// compare a tag against a list of strings and checks if it should +// be ignored or not +func tagIgnoredFSX(t *fsx.Tag) bool { + filter := []string{"^aws:"} + for _, v := range filter { + log.Printf("[DEBUG] Matching %v with %v\n", v, *t.Key) + r, _ := regexp.MatchString(v, *t.Key) + if r { + log.Printf("[DEBUG] Found AWS specific tag %s (val: %s), ignoring.\n", *t.Key, *t.Value) + return true + } + } + return false +} diff --git a/aws/tagsFSX_test.go b/aws/tagsFSX_test.go new file mode 100644 index 00000000000..84cacd1c84c --- /dev/null +++ b/aws/tagsFSX_test.go @@ -0,0 +1,110 @@ +package aws + +import ( + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" +) + +func TestDiffFSXTags(t *testing.T) { + cases := []struct { + Old, New map[string]interface{} + Create, Remove map[string]string + }{ + // Basic add/remove + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "bar": "baz", + }, + Create: map[string]string{ + "bar": "baz", + }, + Remove: map[string]string{ + "foo": "bar", + }, + }, + + // Modify + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "foo": "baz", + }, + Create: map[string]string{ + "foo": "baz", + }, + Remove: map[string]string{ + "foo": "bar", + }, + }, + + // Overlap + { + Old: map[string]interface{}{ + "foo": "bar", + "hello": "world", + }, + New: map[string]interface{}{ + "foo": "baz", + "hello": "world", + }, + Create: map[string]string{ + "foo": "baz", + }, + Remove: map[string]string{ + "foo": "bar", + }, + }, + + // Remove + { + Old: map[string]interface{}{ + "foo": "bar", + "bar": "baz", + }, + New: map[string]interface{}{ + "foo": "bar", + }, + Create: map[string]string{}, + Remove: map[string]string{ + "bar": "baz", + }, + }, + } + + for i, tc := range cases { + c, r := diffTagsFSX(tagsFromMapFSX(tc.Old), tagsFromMapFSX(tc.New)) + cm := tagsToMapFSX(c) + rm := tagsToMapFSX(r) + if !reflect.DeepEqual(cm, tc.Create) { + t.Fatalf("%d: bad create: %#v", i, cm) + } + if !reflect.DeepEqual(rm, tc.Remove) { + t.Fatalf("%d: bad remove: %#v", i, rm) + } + } +} + +func TestIgnoringTagsFSX(t *testing.T) { + var ignoredTags []*fsx.Tag + ignoredTags = append(ignoredTags, &fsx.Tag{ + Key: aws.String("aws:cloudformation:logical-id"), + Value: aws.String("foo"), + }) + ignoredTags = append(ignoredTags, &fsx.Tag{ + Key: aws.String("aws:foo:bar"), + Value: aws.String("baz"), + }) + for _, tag := range ignoredTags { + if !tagIgnoredFSX(tag) { + t.Fatalf("Tag %v with value %v not ignored, but should be!", *tag.Key, *tag.Value) + } + } +} diff --git a/website/aws.erb b/website/aws.erb index f8ae727771d..ed8d10ca4e9 100644 --- a/website/aws.erb +++ b/website/aws.erb @@ -1477,6 +1477,22 @@ +
  • + File System (FSx) + +
  • Gamelift