From 16f51b2f9033742653f5462224b3afcfd9e954e8 Mon Sep 17 00:00:00 2001 From: jackofallops Date: Fri, 31 Jan 2020 17:04:07 +0000 Subject: [PATCH 1/2] Updated managed_disk resource to properly manage attached VM for update to size --- .../compute/resource_arm_managed_disk.go | 205 +++++++++++++++++- .../tests/resource_arm_managed_disk_test.go | 113 ++++++++++ website/docs/r/managed_disk.html.markdown | 6 +- 3 files changed, 312 insertions(+), 12 deletions(-) diff --git a/azurerm/internal/services/compute/resource_arm_managed_disk.go b/azurerm/internal/services/compute/resource_arm_managed_disk.go index 8181f6bb66de..b82877a6b354 100644 --- a/azurerm/internal/services/compute/resource_arm_managed_disk.go +++ b/azurerm/internal/services/compute/resource_arm_managed_disk.go @@ -24,7 +24,7 @@ func resourceArmManagedDisk() *schema.Resource { return &schema.Resource{ Create: resourceArmManagedDiskCreateUpdate, Read: resourceArmManagedDiskRead, - Update: resourceArmManagedDiskCreateUpdate, + Update: resourceArmManagedDiskUpdate, Delete: resourceArmManagedDiskDelete, Importer: &schema.ResourceImporter{ @@ -94,6 +94,7 @@ func resourceArmManagedDisk() *schema.Resource { "storage_account_id": { Type: schema.TypeString, Optional: true, + ForceNew: true, // Not supported by disk update ValidateFunc: azure.ValidateResourceID, }, @@ -160,13 +161,13 @@ func resourceArmManagedDiskCreateUpdate(d *schema.ResourceData, meta interface{} log.Printf("[INFO] preparing arguments for Azure ARM Managed Disk creation.") name := d.Get("name").(string) - resGroup := d.Get("resource_group_name").(string) + resourceGroup := d.Get("resource_group_name").(string) if features.ShouldResourcesBeImported() && d.IsNewResource() { - existing, err := client.Get(ctx, resGroup, name) + existing, err := client.Get(ctx, resourceGroup, name) if err != nil { if !utils.ResponseWasNotFound(existing.Response) { - return fmt.Errorf("Error checking for presence of existing Managed Disk %q (Resource Group %q): %s", name, resGroup, err) + return fmt.Errorf("Error checking for presence of existing Managed Disk %q (Resource Group %q): %s", name, resourceGroup, err) } } @@ -281,21 +282,21 @@ func resourceArmManagedDiskCreateUpdate(d *schema.ResourceData, meta interface{} Zones: zones, } - future, err := client.CreateOrUpdate(ctx, resGroup, name, createDisk) + future, err := client.CreateOrUpdate(ctx, resourceGroup, name, createDisk) if err != nil { - return fmt.Errorf("Error creating/updating Managed Disk %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error creating/updating Managed Disk %q (Resource Group %q): %+v", name, resourceGroup, err) } if err = future.WaitForCompletionRef(ctx, client.Client); err != nil { - return fmt.Errorf("Error waiting for create/update of Managed Disk %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error waiting for create/update of Managed Disk %q (Resource Group %q): %+v", name, resourceGroup, err) } - read, err := client.Get(ctx, resGroup, name) + read, err := client.Get(ctx, resourceGroup, name) if err != nil { - return fmt.Errorf("Error retrieving Managed Disk %q (Resource Group %q): %+v", name, resGroup, err) + return fmt.Errorf("Error retrieving Managed Disk %q (Resource Group %q): %+v", name, resourceGroup, err) } if read.ID == nil { - return fmt.Errorf("Error reading Managed Disk %s (Resource Group %q): ID was nil", name, resGroup) + return fmt.Errorf("Error reading Managed Disk %s (Resource Group %q): ID was nil", name, resourceGroup) } d.SetId(*read.ID) @@ -303,6 +304,190 @@ func resourceArmManagedDiskCreateUpdate(d *schema.ResourceData, meta interface{} return resourceArmManagedDiskRead(d, meta) } +func resourceArmManagedDiskUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*clients.Client).Compute.DisksClient + ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) + defer cancel() + + log.Printf("[INFO] preparing arguments for Azure ARM Managed Disk update.") + + name := d.Get("name").(string) + resourceGroup := d.Get("resource_group_name").(string) + storageAccountType := d.Get("storage_account_type").(string) + + disk, err := client.Get(ctx, resourceGroup, name) + if err != nil { + if utils.ResponseWasNotFound(disk.Response) { + return fmt.Errorf("Error Managed Disk %q (Resource Group %q) was not found", name, resourceGroup) + } + + return fmt.Errorf("Error making Read request on Azure Managed Disk %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + diskUpdate := compute.DiskUpdate{ + DiskUpdateProperties: &compute.DiskUpdateProperties{}, + } + + if d.HasChange("tags") { + t := d.Get("tags").(map[string]interface{}) + diskUpdate.Tags = tags.Expand(t) + } + + if d.HasChange("storage_account_type") { + var skuName compute.DiskStorageAccountTypes + for _, v := range compute.PossibleDiskStorageAccountTypesValues() { + if strings.EqualFold(storageAccountType, string(v)) { + skuName = v + } + } + diskUpdate.Sku = &compute.DiskSku{ + Name: skuName, + } + } + + if strings.EqualFold(storageAccountType, string(compute.UltraSSDLRS)) { + if d.HasChange("disk_iops_read_write") { + v := d.Get("disk_iops_read_write") + diskIOPS := int64(v.(int)) + diskUpdate.DiskIOPSReadWrite = &diskIOPS + } + + if d.HasChange("disk_mbps_read_write") { + v := d.Get("disk_mbps_read_write") + diskMBps := int32(v.(int)) + diskUpdate.DiskMBpsReadWrite = &diskMBps + } + } else { + if d.HasChange("disk_iops_read_write") || d.HasChange("disk_mbps_read_write") { + return fmt.Errorf("[ERROR] disk_iops_read_write and disk_mbps_read_write are only available for UltraSSD disks") + } + } + + if d.HasChange("os_type") { + diskUpdate.DiskUpdateProperties.OsType = compute.OperatingSystemTypes(d.Get("os_type").(string)) + } + + if d.HasChange("disk_size_gb") { + if old, new := d.GetChange("disk_size_gb"); new.(int) > old.(int) { + diskUpdate.DiskUpdateProperties.DiskSizeGB = utils.Int32(int32(new.(int))) + } else { + return fmt.Errorf("Error - New size must be greater than original size. Shrinking disks is not supported on Azure") + } + } + + // if we are attached to a VM we bring down the VM as necessary for the operations which are not allowed while it's online + if disk.ManagedBy != nil { + virtualMachine, err := ParseVirtualMachineID(*disk.ManagedBy) + if err != nil { + return fmt.Errorf("Error parsing VMID %q for disk attachment: %+v", *disk.ManagedBy, err) + } + // check instanceView State + vmClient := meta.(*clients.Client).Compute.VMClient + + instanceView, err := vmClient.InstanceView(ctx, virtualMachine.ResourceGroup, virtualMachine.Name) + if err != nil { + return fmt.Errorf("Error retrieving InstanceView for Virtual Machine %q (Resource Group %q): %+v", virtualMachine.Name, virtualMachine.ResourceGroup, err) + } + + shouldTurnBackOn := true + shouldShutDown := true + shouldDeallocate := true + + if instanceView.Statuses != nil { + for _, status := range *instanceView.Statuses { + if status.Code == nil { + continue + } + + // could also be the provisioning state which we're not bothered with here + state := strings.ToLower(*status.Code) + if !strings.HasPrefix(state, "PowerState/") { + continue + } + + state = strings.TrimPrefix(state, "powerstate/") + switch strings.ToLower(state) { + case "deallocated": + case "deallocating": + shouldTurnBackOn = false + shouldShutDown = false + shouldDeallocate = false + case "stopping": + case "stopped": + shouldShutDown = false + shouldTurnBackOn = false + } + } + } + + // Shutdown + if shouldShutDown { + log.Printf("[DEBUG] Shutting Down Virtual Machine %q (Resource Group %q)..", virtualMachine.Name, virtualMachine.ResourceGroup) + forceShutdown := false + future, err := vmClient.PowerOff(ctx, virtualMachine.ResourceGroup, virtualMachine.Name, utils.Bool(forceShutdown)) + if err != nil { + return fmt.Errorf("Error sending Power Off to Virtual Machine %q (Resource Group %q): %+v", virtualMachine.Name, virtualMachine.ResourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for Power Off of Virtual Machine %q (Resource Group %q): %+v", virtualMachine.Name, virtualMachine.ResourceGroup, err) + } + + log.Printf("[DEBUG] Shut Down Virtual Machine %q (Resource Group %q)..", virtualMachine.Name, virtualMachine.ResourceGroup) + } + + // De-allocate + if shouldDeallocate { + log.Printf("[DEBUG] Deallocating Virtual Machine %q (Resource Group %q)..", virtualMachine.Name, virtualMachine.ResourceGroup) + deAllocFuture, err := vmClient.Deallocate(ctx, virtualMachine.ResourceGroup, virtualMachine.Name) + if err != nil { + return fmt.Errorf("Error Deallocating to Virtual Machine %q (Resource Group %q): %+v", virtualMachine.Name, virtualMachine.ResourceGroup, err) + } + + if err := deAllocFuture.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for Deallocation of Virtual Machine %q (Resource Group %q): %+v", virtualMachine.Name, virtualMachine.ResourceGroup, err) + } + + log.Printf("[DEBUG] Deallocated Virtual Machine %q (Resource Group %q)..", virtualMachine.Name, virtualMachine.ResourceGroup) + } + + // Update Disk + updateFuture, err := client.Update(ctx, resourceGroup, name, diskUpdate) + if err != nil { + return fmt.Errorf("Error updating Managed Disk %q (Resource Group %q): %+v", name, resourceGroup, err) + } + if err := updateFuture.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for update of Managed Disk %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + if shouldTurnBackOn { + log.Printf("[DEBUG] Starting Linux Virtual Machine %q (Resource Group %q)..", virtualMachine.Name, virtualMachine.ResourceGroup) + future, err := vmClient.Start(ctx, virtualMachine.ResourceGroup, virtualMachine.Name) + if err != nil { + return fmt.Errorf("Error starting Virtual Machine %q (Resource Group %q): %+v", virtualMachine.Name, virtualMachine.ResourceGroup, err) + } + + if err := future.WaitForCompletionRef(ctx, client.Client); err != nil { + return fmt.Errorf("Error waiting for start of Virtual Machine %q (Resource Group %q): %+v", virtualMachine.Name, virtualMachine.ResourceGroup, err) + } + + log.Printf("[DEBUG] Started Virtual Machine %q (Resource Group %q)..", virtualMachine.Name, virtualMachine.ResourceGroup) + } + } else { // otherwise, just update it + diskFuture, err := client.Update(ctx, resourceGroup, name, diskUpdate) + if err != nil { + return fmt.Errorf("Error expanding managed disk %q (Resource Group %q): %+v", name, resourceGroup, err) + } + + err = diskFuture.WaitForCompletionRef(ctx, client.Client) + if err != nil { + return fmt.Errorf("Error waiting for expand operation on managed disk %q (Resource Group %q): %+v", name, resourceGroup, err) + } + } + + return resourceArmManagedDiskRead(d, meta) +} + func resourceArmManagedDiskRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*clients.Client).Compute.DisksClient ctx, cancel := timeouts.ForRead(meta.(*clients.Client).StopContext, d) diff --git a/azurerm/internal/services/compute/tests/resource_arm_managed_disk_test.go b/azurerm/internal/services/compute/tests/resource_arm_managed_disk_test.go index 0ad4429b476e..8ff810035dcb 100644 --- a/azurerm/internal/services/compute/tests/resource_arm_managed_disk_test.go +++ b/azurerm/internal/services/compute/tests/resource_arm_managed_disk_test.go @@ -358,6 +358,36 @@ func TestAccAzureRMManagedDisk_diskEncryptionSet(t *testing.T) { }) } +func TestAccAzureRMManagedDisk_attachedDiskUpdate(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_managed_disk", "test") + var d compute.Disk + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acceptance.PreCheck(t) }, + Providers: acceptance.SupportedProviders, + CheckDestroy: testCheckAzureRMManagedDiskDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAzureRMManagedDisk_managedDiskAttached(data, 10), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMManagedDiskExists(data.ResourceName, &d, true), + ), + }, + data.ImportStep(), + { + Config: testAccAzureRMManagedDisk_managedDiskAttached(data, 20), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMManagedDiskExists(data.ResourceName, &d, true), + resource.TestCheckResourceAttr(data.ResourceName, "disk_size_gb", "20"), + ), + }, + data.ImportStep(), + }, + }) +} + +// TODO: More property update tests? + func testCheckAzureRMManagedDiskExists(resourceName string, d *compute.Disk, shouldExist bool) resource.TestCheckFunc { return func(s *terraform.State) error { client := acceptance.AzureProvider.Meta().(*clients.Client).Compute.DisksClient @@ -972,3 +1002,86 @@ resource "azurerm_managed_disk" "test" { } `, template, data.RandomInteger, data.RandomInteger) } + +func testAccAzureRMManagedDisk_managedDiskAttached(data acceptance.TestData, diskSize int) string { + return fmt.Sprintf(` +resource "azurerm_resource_group" "test" { + name = "acctestRG-%[1]d" + location = "%[2]s" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%[1]d" + address_space = ["10.0.0.0/16"] + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%[1]d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_network_interface" "test" { + name = "acctni-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "Dynamic" + } +} + +resource "azurerm_virtual_machine" "test" { + name = "acctvm-%[1]d" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + network_interface_ids = ["${azurerm_network_interface.test.id}"] + vm_size = "Standard_F2" + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "16.04-LTS" + version = "latest" + } + + storage_os_disk { + name = "myosdisk1" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + + os_profile { + computer_name = "hn%[1]d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } +} + +resource "azurerm_managed_disk" "test" { + name = "%[1]d-disk1" + location = "${azurerm_resource_group.test.location}" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_type = "Standard_LRS" + create_option = "Empty" + disk_size_gb = %[3]d +} + +resource "azurerm_virtual_machine_data_disk_attachment" "test" { + managed_disk_id = "${azurerm_managed_disk.test.id}" + virtual_machine_id = "${azurerm_virtual_machine.test.id}" + lun = "0" + caching = "None" +} +`, data.RandomInteger, data.Locations.Primary, diskSize) +} diff --git a/website/docs/r/managed_disk.html.markdown b/website/docs/r/managed_disk.html.markdown index 9dcd2a090178..a2d22b5186d0 100644 --- a/website/docs/r/managed_disk.html.markdown +++ b/website/docs/r/managed_disk.html.markdown @@ -82,7 +82,7 @@ The following arguments are supported: -> **Note**: A `storage_account_type` of type `UltraSSD_LRS` and the arguments `disk_iops_read_write` and `disk_mbps_read_write` are currently in private preview and are not available to subscriptions that have not requested onboarding to `Azure Ultra Disk Storage` private preview. `Azure Ultra Disk Storage` is only available in `East US 2`, `North Europe`, and `Southeast Asia` regions. For more information see the `Azure Ultra Disk Storage` [product documentation](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-enable-ultra-ssd), [product blog](https://azure.microsoft.com/en-us/blog/announcing-the-general-availability-of-azure-ultra-disk-storage/) and [FAQ](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/faq-for-disks#ultra-disks). -* `create_option` - (Required) The method to use when creating the managed disk. Possible values include: +* `create_option` - (Required) The method to use when creating the managed disk. Changing this forces a new resource to be created. Possible values include: * `Import` - Import a VHD file in to the managed disk (VHD specified with `source_uri`). * `Empty` - Create an empty managed disk. * `Copy` - Copy an existing managed disk or snapshot (specified with `source_resource_id`). @@ -103,6 +103,8 @@ The following arguments are supported: * `disk_size_gb` - (Optional, Required for a new managed disk) Specifies the size of the managed disk to create in gigabytes. If `create_option` is `Copy` or `FromImage`, then the value must be equal to or greater than the source's size. +~> **NOTE:** Changing this value is disruptive if the disk is attached to a Virtual Machine. The VM will be shut down and de-allocated as required by Azure to action the change. Terraform will attempt to start the machine again after the update if it was in a `running` state when the apply was started. + * `encryption_settings` - (Optional) A `encryption_settings` block as defined below. * `image_reference_id` - (Optional) ID of an existing platform/marketplace disk image to copy when `create_option` is `FromImage`. @@ -113,7 +115,7 @@ The following arguments are supported: * `source_uri` - (Optional) URI to a valid VHD file to be used when `create_option` is `Import`. -* `storage_account_id` - (Optional) The ID of the Storage Account where the `source_uri` is located. Required when `create_option` is set to `Import`. +* `storage_account_id` - (Optional) The ID of the Storage Account where the `source_uri` is located. Required when `create_option` is set to `Import`. Changing this forces a new resource to be created. * `tags` - (Optional) A mapping of tags to assign to the resource. From a2ea3f22e8aa9511c8849a9c292231a2b9a22316 Mon Sep 17 00:00:00 2001 From: jackofallops Date: Mon, 3 Feb 2020 07:23:10 +0000 Subject: [PATCH 2/2] Added locking around VM resource for disk operations --- .../internal/services/compute/resource_arm_managed_disk.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/azurerm/internal/services/compute/resource_arm_managed_disk.go b/azurerm/internal/services/compute/resource_arm_managed_disk.go index b82877a6b354..14069bbed748 100644 --- a/azurerm/internal/services/compute/resource_arm_managed_disk.go +++ b/azurerm/internal/services/compute/resource_arm_managed_disk.go @@ -15,6 +15,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/tf" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/locks" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" @@ -384,6 +385,9 @@ func resourceArmManagedDiskUpdate(d *schema.ResourceData, meta interface{}) erro // check instanceView State vmClient := meta.(*clients.Client).Compute.VMClient + locks.ByName(name, virtualMachineResourceName) + defer locks.UnlockByName(name, virtualMachineResourceName) + instanceView, err := vmClient.InstanceView(ctx, virtualMachine.ResourceGroup, virtualMachine.Name) if err != nil { return fmt.Errorf("Error retrieving InstanceView for Virtual Machine %q (Resource Group %q): %+v", virtualMachine.Name, virtualMachine.ResourceGroup, err)