diff --git a/civo/instances/datasource_instance.go b/civo/instances/datasource_instance.go index 332412aa..fef25748 100644 --- a/civo/instances/datasource_instance.go +++ b/civo/instances/datasource_instance.go @@ -102,6 +102,20 @@ func DataSourceInstance() *schema.Resource { Computed: true, Description: "An optional list of tags", }, + "attached_volume": { + Type: schema.TypeList, + Optional: true, + Description: "A list of volumes to attached at boot to the instance.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: "The ID of the volume to attach.", + }, + }, + }, + }, "script": { Type: schema.TypeString, Computed: true, @@ -192,5 +206,17 @@ func dataSourceInstanceRead(_ context.Context, d *schema.ResourceData, m interfa d.Set("created_at", foundImage.CreatedAt.UTC().String()) d.Set("notes", foundImage.Notes) + if len(foundImage.AttachedVolumes) > 0 { + volumes := make([]map[string]interface{}, 0, len(foundImage.AttachedVolumes)) + for _, volume := range foundImage.AttachedVolumes { + volumeMap := map[string]interface{}{ + "id": volume.ID, + } + volumes = append(volumes, volumeMap) + } + + d.Set("attached_volume", volumes) + } + return nil } diff --git a/civo/instances/resource_instance.go b/civo/instances/resource_instance.go index 7d406489..ec10be75 100644 --- a/civo/instances/resource_instance.go +++ b/civo/instances/resource_instance.go @@ -20,7 +20,7 @@ import ( ) // ResourceInstance The instance resource represents an object of type instances -// and with it you can handle the instances created with Terraform +// and with it, you can handle the instances created with Terraform func ResourceInstance() *schema.Resource { return &schema.Resource{ Description: "Provides a Civo instance resource. This can be used to create, modify, and delete instances.", @@ -96,8 +96,9 @@ func ResourceInstance() *schema.Resource { Description: "The ID of the firewall to use, from the current list. If left blank or not sent, the default firewall will be used (open to all)", }, "volume_type": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + //Default: "ms-xfs-2-replicas", Description: "The type of volume to use, either 'ssd' or 'bssd' (optional; default 'ssd')", }, "tags": { @@ -106,6 +107,20 @@ func ResourceInstance() *schema.Resource { Description: "An optional list of tags, represented as a key, value pair", Elem: &schema.Schema{Type: schema.TypeString}, }, + "attached_volume": { + Type: schema.TypeList, + Optional: true, + Description: "A list of volumes to attached at boot to the instance.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: "The ID of the volume to attach.", + }, + }, + }, + }, "script": { Type: schema.TypeString, Optional: true, @@ -287,6 +302,18 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, m inter config.Tags = tags + tfVolumeAttach := d.Get("attached_volume").([]interface{}) + volumes := make([]civogo.AttachedVolume, 0, len(tfVolumeAttach)) + for _, v := range tfVolumeAttach { + volumeData := v.(map[string]interface{}) + volumes = append(volumes, civogo.AttachedVolume{ + ID: volumeData["id"].(string), + }) + } + if len(volumes) > 0 { + config.AttachedVolumes = volumes + } + log.Printf("[INFO] creating the instance %s", d.Get("hostname").(string)) instance, err := apiClient.CreateInstance(config) @@ -295,8 +322,7 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, m inter if parseErr == nil { err = customErr } - // quota errors introduce new line after each missing quota, causing formatting issues: - return diag.Errorf("[ERR] failed to create instance: %s", strings.ReplaceAll(err.Error(), "\n", " ")) + return diag.Errorf("[ERR] failed to create instance: %s", err) } d.SetId(instance.ID) @@ -375,6 +401,32 @@ func resourceInstanceRead(_ context.Context, d *schema.ResourceData, m interface d.Set("initial_password", "") } + if len(resp.AttachedVolumes) > 0 { + // Get the attached volumes from the API response + attachedVolumes := resp.AttachedVolumes + + // Get the attached volumes from the Terraform state + tfAttachedVolumes := d.Get("attached_volume").([]interface{}) + + // Create a map of volumes listed in the Terraform config for comparison + configVolumeMap := make(map[string]bool) + for _, v := range tfAttachedVolumes { + volume := v.(map[string]interface{}) + configVolumeMap[volume["id"].(string)] = true + } + + // Filter out API volumes that are not in the Terraform config + var filteredVolumes []civogo.AttachedVolume + for _, vol := range attachedVolumes { + if _, exists := configVolumeMap[vol.ID]; exists { + filteredVolumes = append(filteredVolumes, vol) + } + } + + // Set only the filtered volumes in the Terraform state + d.Set("attached_volume", filteredVolumes) + } + if resp.Script == "" { d.Set("script", "") } @@ -486,6 +538,51 @@ func resourceInstanceUpdate(ctx context.Context, d *schema.ResourceData, m inter } } + if d.HasChange("attached_volume") { + oldVolumes, newVolumes := d.GetChange("attached_volume") + oldVolumeList := oldVolumes.([]interface{}) + newVolumeList := newVolumes.([]interface{}) + + // Check if there are any new volumes being attached + for _, newVolume := range newVolumeList { + newVolumeData := newVolume.(map[string]interface{}) + found := false + for _, oldVolume := range oldVolumeList { + oldVolumeData := oldVolume.(map[string]interface{}) + if newVolumeData["id"] == oldVolumeData["id"] { + found = true + break + } + } + if !found { + // This is a new volume being attached, which is not allowed + return diag.Errorf("Attaching new volumes after instance creation is not allowed. Please create a new civo_volume_attachment resource for attaching additional volume.") + } + } + + // Handle volume detachments + for _, oldVolume := range oldVolumeList { + oldVolumeData := oldVolume.(map[string]interface{}) + found := false + for _, newVolume := range newVolumeList { + newVolumeData := newVolume.(map[string]interface{}) + if oldVolumeData["id"] == newVolumeData["id"] { + found = true + break + } + } + if !found { + // This volume is no longer in the config, so detach it + volumeID := oldVolumeData["id"].(string) + _, err := apiClient.DetachVolume(volumeID) + if err != nil { + return diag.Errorf("Error detaching volume %s: %s", volumeID, err) + } + log.Printf("[INFO] Successfully detached volume %s from instance %s", volumeID, d.Id()) + } + } + } + // If reserved_ipv4 has changed, update the instance with the new reserved IP if d.HasChange("reserved_ipv4") { oldReservedIP, newReservedIP := d.GetChange("reserved_ipv4") diff --git a/civo/volume/resource_volume.go b/civo/volume/resource_volume.go index 6f8d6a81..f8600a96 100644 --- a/civo/volume/resource_volume.go +++ b/civo/volume/resource_volume.go @@ -49,6 +49,7 @@ func ResourceVolume() *schema.Resource { "volume_type": { Type: schema.TypeString, Optional: true, + Default: "ms-xfs-2-replicas", Description: "The type of the volume", }, }, @@ -91,7 +92,7 @@ func resourceVolumeCreate(ctx context.Context, d *schema.ResourceData, m interfa volume, err := apiClient.NewVolume(config) if err != nil { - return diag.Errorf("[ERR] failed to create a new volume: %s", err) + return diag.Errorf("[ERR] failed to create a new volume: %s region: %s", err, apiClient.Region) } d.SetId(volume.ID) diff --git a/civo/volume/resource_volume_attachment.go b/civo/volume/resource_volume_attachment.go index 1c1f86ef..5647c596 100644 --- a/civo/volume/resource_volume_attachment.go +++ b/civo/volume/resource_volume_attachment.go @@ -33,6 +33,12 @@ func ResourceVolumeAttachment() *schema.Resource { ValidateFunc: validation.NoZeroValues, Description: "The ID of target volume for attachment", }, + "attach_at_boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "The volume is attach at boot time", + }, "region": { Type: schema.TypeString, Optional: true, @@ -85,7 +91,7 @@ func resourceVolumeAttachmentCreate(ctx context.Context, d *schema.ResourceData, } log.Printf("[INFO] attaching the volume %s to instance %s", volumeID, instanceID) - _, err := apiClient.AttachVolume(volumeID, vuc) + _, err = apiClient.AttachVolume(volumeID, vuc) if err != nil { return diag.Errorf("[ERR] error attaching volume to instance %s", err) } @@ -108,12 +114,22 @@ func resourceVolumeAttachmentCreate(ctx context.Context, d *schema.ResourceData, MinTimeout: 3 * time.Second, NotFoundChecks: 10, } - _, err = createStateConf.WaitForStateContext(context.Background()) + if attachAtBoot { + createStateConf.Pending = []string{"available", "attaching"} + createStateConf.Target = []string{"attaching"} + } + + ctx, cancel := context.WithTimeout(context.Background(), createStateConf.Timeout) + defer cancel() + + _, err = createStateConf.WaitForStateContext(ctx) if err != nil { return diag.Errorf("error waiting for volume (%s) to be attached: %s", d.Id(), err) } - return resourceVolumeAttachmentRead(ctx, d, m) + ret := resourceVolumeAttachmentRead(ctx, d, m) + diags = append(diags, ret...) + return diags } // function to read the volume @@ -150,6 +166,7 @@ func resourceVolumeAttachmentRead(_ context.Context, d *schema.ResourceData, m i // function to delete the volume func resourceVolumeAttachmentDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { apiClient := m.(*civogo.Client) + var diags diag.Diagnostics // overwrite the region if it's defined if region, ok := d.GetOk("region"); ok { @@ -157,11 +174,22 @@ func resourceVolumeAttachmentDelete(_ context.Context, d *schema.ResourceData, m } volumeID := d.Get("volume_id").(string) + attachAtBoot := d.Get("attach_at_boot").(bool) + instanceID := d.Get("instance_id").(string) + + if attachAtBoot { + // Notify the terminal + msg := fmt.Sprintf("To use the volume %s, The instance %s needs to be rebooted", volumeID, instanceID) + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: msg, + }) + } log.Printf("[INFO] Detaching the volume %s", d.Id()) _, err := apiClient.DetachVolume(volumeID) if err != nil { return diag.Errorf("[ERR] an error occurred while trying to detach the volume %s", err) } - return nil + return diags }