diff --git a/vsphere/helper_test.go b/vsphere/helper_test.go index d6b2ba7b1..543364d37 100644 --- a/vsphere/helper_test.go +++ b/vsphere/helper_test.go @@ -1022,3 +1022,31 @@ func testGetComputeClusterVMAntiAffinityRule(s *terraform.State, resourceName st return resourceVSphereComputeClusterVMAntiAffinityRuleFindEntry(cluster, name) } + +// testGetDatastoreClusterVMAntiAffinityRule is a convenience method to fetch a +// VM anti-affinity rule from a datastore cluster. +func testGetDatastoreClusterVMAntiAffinityRule(s *terraform.State, resourceName string) (*types.ClusterAntiAffinityRuleSpec, error) { + vars, err := testClientVariablesForResource( + s, + fmt.Sprintf("%s.%s", resourceVSphereDatastoreClusterVMAntiAffinityRuleName, resourceName), + ) + if err != nil { + return nil, err + } + + if vars.resourceID == "" { + return nil, errors.New("resource ID is empty") + } + + podID, key, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleParseID(vars.resourceID) + if err != nil { + return nil, err + } + + pod, err := storagepod.FromID(vars.client, podID) + if err != nil { + return nil, err + } + + return resourceVSphereDatastoreClusterVMAntiAffinityRuleFindEntry(pod, key) +} diff --git a/vsphere/internal/helper/storagepod/storage_pod_helper.go b/vsphere/internal/helper/storagepod/storage_pod_helper.go index 5e8483c83..332e3adc7 100644 --- a/vsphere/internal/helper/storagepod/storage_pod_helper.go +++ b/vsphere/internal/helper/storagepod/storage_pod_helper.go @@ -82,7 +82,7 @@ func Create(f *object.Folder, name string) (*object.StoragePod, error) { // ApplyDRSConfiguration takes a types.StorageDrsConfigSpec and applies it // against the specified StoragePod. func ApplyDRSConfiguration(client *govmomi.Client, pod *object.StoragePod, spec types.StorageDrsConfigSpec) error { - log.Printf("[DEBUG] Applying storage DRS configuration against datastore clsuter %q", pod.InventoryPath) + log.Printf("[DEBUG] Applying storage DRS configuration against datastore cluster %q", pod.InventoryPath) mgr := object.NewStorageResourceManager(client.Client) ctx, cancel := context.WithTimeout(context.Background(), provider.DefaultAPITimeout) defer cancel() diff --git a/vsphere/provider.go b/vsphere/provider.go index 08dcbeb69..c0a337ded 100644 --- a/vsphere/provider.go +++ b/vsphere/provider.go @@ -88,34 +88,35 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "vsphere_compute_cluster": resourceVSphereComputeCluster(), - "vsphere_compute_cluster_host_group": resourceVSphereComputeClusterHostGroup(), - "vsphere_compute_cluster_vm_affinity_rule": resourceVSphereComputeClusterVMAffinityRule(), - "vsphere_compute_cluster_vm_anti_affinity_rule": resourceVSphereComputeClusterVMAntiAffinityRule(), - "vsphere_compute_cluster_vm_dependency_rule": resourceVSphereComputeClusterVMDependencyRule(), - "vsphere_compute_cluster_vm_group": resourceVSphereComputeClusterVMGroup(), - "vsphere_compute_cluster_vm_host_rule": resourceVSphereComputeClusterVMHostRule(), - "vsphere_custom_attribute": resourceVSphereCustomAttribute(), - "vsphere_datacenter": resourceVSphereDatacenter(), - "vsphere_datastore_cluster": resourceVSphereDatastoreCluster(), - "vsphere_distributed_port_group": resourceVSphereDistributedPortGroup(), - "vsphere_distributed_virtual_switch": resourceVSphereDistributedVirtualSwitch(), - "vsphere_drs_vm_override": resourceVSphereDRSVMOverride(), - "vsphere_dpm_host_override": resourceVSphereDPMHostOverride(), - "vsphere_file": resourceVSphereFile(), - "vsphere_folder": resourceVSphereFolder(), - "vsphere_ha_vm_override": resourceVSphereHAVMOverride(), - "vsphere_host_port_group": resourceVSphereHostPortGroup(), - "vsphere_host_virtual_switch": resourceVSphereHostVirtualSwitch(), - "vsphere_license": resourceVSphereLicense(), - "vsphere_tag": resourceVSphereTag(), - "vsphere_tag_category": resourceVSphereTagCategory(), - "vsphere_virtual_disk": resourceVSphereVirtualDisk(), - "vsphere_virtual_machine": resourceVSphereVirtualMachine(), - "vsphere_nas_datastore": resourceVSphereNasDatastore(), - "vsphere_storage_drs_vm_override": resourceVSphereStorageDrsVMOverride(), - "vsphere_vmfs_datastore": resourceVSphereVmfsDatastore(), - "vsphere_virtual_machine_snapshot": resourceVSphereVirtualMachineSnapshot(), + "vsphere_compute_cluster": resourceVSphereComputeCluster(), + "vsphere_compute_cluster_host_group": resourceVSphereComputeClusterHostGroup(), + "vsphere_compute_cluster_vm_affinity_rule": resourceVSphereComputeClusterVMAffinityRule(), + "vsphere_compute_cluster_vm_anti_affinity_rule": resourceVSphereComputeClusterVMAntiAffinityRule(), + "vsphere_compute_cluster_vm_dependency_rule": resourceVSphereComputeClusterVMDependencyRule(), + "vsphere_compute_cluster_vm_group": resourceVSphereComputeClusterVMGroup(), + "vsphere_compute_cluster_vm_host_rule": resourceVSphereComputeClusterVMHostRule(), + "vsphere_custom_attribute": resourceVSphereCustomAttribute(), + "vsphere_datacenter": resourceVSphereDatacenter(), + "vsphere_datastore_cluster": resourceVSphereDatastoreCluster(), + "vsphere_datastore_cluster_vm_anti_affinity_rule": resourceVSphereDatastoreClusterVMAntiAffinityRule(), + "vsphere_distributed_port_group": resourceVSphereDistributedPortGroup(), + "vsphere_distributed_virtual_switch": resourceVSphereDistributedVirtualSwitch(), + "vsphere_drs_vm_override": resourceVSphereDRSVMOverride(), + "vsphere_dpm_host_override": resourceVSphereDPMHostOverride(), + "vsphere_file": resourceVSphereFile(), + "vsphere_folder": resourceVSphereFolder(), + "vsphere_ha_vm_override": resourceVSphereHAVMOverride(), + "vsphere_host_port_group": resourceVSphereHostPortGroup(), + "vsphere_host_virtual_switch": resourceVSphereHostVirtualSwitch(), + "vsphere_license": resourceVSphereLicense(), + "vsphere_tag": resourceVSphereTag(), + "vsphere_tag_category": resourceVSphereTagCategory(), + "vsphere_virtual_disk": resourceVSphereVirtualDisk(), + "vsphere_virtual_machine": resourceVSphereVirtualMachine(), + "vsphere_nas_datastore": resourceVSphereNasDatastore(), + "vsphere_storage_drs_vm_override": resourceVSphereStorageDrsVMOverride(), + "vsphere_vmfs_datastore": resourceVSphereVmfsDatastore(), + "vsphere_virtual_machine_snapshot": resourceVSphereVirtualMachineSnapshot(), }, DataSourcesMap: map[string]*schema.Resource{ diff --git a/vsphere/resource_vsphere_datastore_cluster_vm_anti_affinity_rule.go b/vsphere/resource_vsphere_datastore_cluster_vm_anti_affinity_rule.go new file mode 100644 index 000000000..376688987 --- /dev/null +++ b/vsphere/resource_vsphere_datastore_cluster_vm_anti_affinity_rule.go @@ -0,0 +1,449 @@ +package vsphere + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/storagepod" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/structure" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/viapi" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" +) + +const resourceVSphereDatastoreClusterVMAntiAffinityRuleName = "vsphere_datastore_cluster_vm_anti_affinity_rule" + +func resourceVSphereDatastoreClusterVMAntiAffinityRule() *schema.Resource { + return &schema.Resource{ + Create: resourceVSphereDatastoreClusterVMAntiAffinityRuleCreate, + Read: resourceVSphereDatastoreClusterVMAntiAffinityRuleRead, + Update: resourceVSphereDatastoreClusterVMAntiAffinityRuleUpdate, + Delete: resourceVSphereDatastoreClusterVMAntiAffinityRuleDelete, + Importer: &schema.ResourceImporter{ + State: resourceVSphereDatastoreClusterVMAntiAffinityRuleImport, + }, + + Schema: map[string]*schema.Schema{ + "datastore_cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The managed object ID of the datastore cluster.", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "The unique name of the virtual machine group in the cluster.", + }, + "virtual_machine_ids": { + Type: schema.TypeSet, + Required: true, + Description: "The UUIDs of the virtual machines to run on different datastores from each other.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Enable this rule in the cluster.", + }, + "mandatory": { + Type: schema.TypeBool, + Optional: true, + Description: "When true, prevents any virtual machine operations that may violate this rule.", + }, + }, + } +} + +func resourceVSphereDatastoreClusterVMAntiAffinityRuleCreate(d *schema.ResourceData, meta interface{}) error { + if err := resourceVSphereDatastoreClusterVMAntiAffinityRuleValidateRuleVMCount(d); err != nil { + return err + } + + log.Printf("[DEBUG] %s: Beginning create", resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString(d)) + + pod, _, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleObjects(d, meta) + if err != nil { + return err + } + + info, err := expandClusterAntiAffinityRuleSpec(d, meta) + if err != nil { + return err + } + spec := types.StorageDrsConfigSpec{ + PodConfigSpec: &types.StorageDrsPodConfigSpec{ + Rule: []types.ClusterRuleSpec{ + { + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: types.ArrayUpdateOperationAdd, + }, + Info: info, + }, + }, + }, + } + + if err = resourceVSphereDatastoreClusterVMAntiAffinityRuleApplySDRSConfigSpec(pod, spec); err != nil { + return err + } + + info, err = resourceVSphereDatastoreClusterVMAntiAffinityRuleFindEntryByName(pod, info.Name) + if err != nil { + return err + } + + id, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleFlattenID(pod, info.Key) + if err != nil { + return fmt.Errorf("cannot compute ID of created resource: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] %s: Create finished successfully", resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString(d)) + return resourceVSphereDatastoreClusterVMAntiAffinityRuleRead(d, meta) +} + +func resourceVSphereDatastoreClusterVMAntiAffinityRuleRead(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] %s: Beginning read", resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString(d)) + + pod, key, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleObjects(d, meta) + if err != nil { + return err + } + + info, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleFindEntry(pod, key) + if err != nil { + return err + } + + if info == nil { + // The configuration is missing, blank out the ID so it can be re-created. + d.SetId("") + return nil + } + + // Save the datastore_cluster_id. This is ForceNew, but we set these for + // completeness on import so that if the wrong pod/VM combo was used, it + // will be noted. + if err = d.Set("datastore_cluster_id", pod.Reference().Value); err != nil { + return fmt.Errorf("error setting attribute \"datastore_cluster_id\": %s", err) + } + + if err = flattenClusterAntiAffinityRuleSpec(d, meta, info); err != nil { + return err + } + + log.Printf("[DEBUG] %s: Read completed successfully", resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString(d)) + return nil +} + +func resourceVSphereDatastoreClusterVMAntiAffinityRuleUpdate(d *schema.ResourceData, meta interface{}) error { + if err := resourceVSphereDatastoreClusterVMAntiAffinityRuleValidateRuleVMCount(d); err != nil { + return err + } + + log.Printf("[DEBUG] %s: Beginning update", resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString(d)) + + pod, key, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleObjects(d, meta) + if err != nil { + return err + } + + info, err := expandClusterAntiAffinityRuleSpec(d, meta) + if err != nil { + return err + } + info.Key = key + + spec := types.StorageDrsConfigSpec{ + PodConfigSpec: &types.StorageDrsPodConfigSpec{ + Rule: []types.ClusterRuleSpec{ + { + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: types.ArrayUpdateOperationEdit, + }, + Info: info, + }, + }, + }, + } + + if err := resourceVSphereDatastoreClusterVMAntiAffinityRuleApplySDRSConfigSpec(pod, spec); err != nil { + return err + } + + log.Printf("[DEBUG] %s: Update finished successfully", resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString(d)) + return resourceVSphereDatastoreClusterVMAntiAffinityRuleRead(d, meta) +} + +func resourceVSphereDatastoreClusterVMAntiAffinityRuleDelete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] %s: Beginning delete", resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString(d)) + + pod, key, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleObjects(d, meta) + if err != nil { + return err + } + + spec := types.StorageDrsConfigSpec{ + PodConfigSpec: &types.StorageDrsPodConfigSpec{ + Rule: []types.ClusterRuleSpec{ + { + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: types.ArrayUpdateOperationRemove, + RemoveKey: key, + }, + }, + }, + }, + } + + if err := resourceVSphereDatastoreClusterVMAntiAffinityRuleApplySDRSConfigSpec(pod, spec); err != nil { + return err + } + + log.Printf("[DEBUG] %s: Deleted successfully", resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString(d)) + return nil +} + +func resourceVSphereDatastoreClusterVMAntiAffinityRuleImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + var data map[string]string + if err := json.Unmarshal([]byte(d.Id()), &data); err != nil { + return nil, err + } + podPath, ok := data["datastore_cluster_path"] + if !ok { + return nil, errors.New("missing datastore_cluster_path in input data") + } + name, ok := data["name"] + if !ok { + return nil, errors.New("missing name in input data") + } + + client, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleClient(meta) + if err != nil { + return nil, err + } + + pod, err := storagepod.FromPath(client, podPath, nil) + if err != nil { + return nil, fmt.Errorf("cannot locate datastore cluster %q: %s", podPath, err) + } + + info, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleFindEntryByName(pod, name) + if err != nil { + return nil, err + } + + id, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleFlattenID(pod, info.Key) + if err != nil { + return nil, fmt.Errorf("cannot compute ID of imported resource: %s", err) + } + d.SetId(id) + return []*schema.ResourceData{d}, nil +} + +// resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString prints a friendly string for the +// vsphere_datastore_cluster_vm_anti_affinity_rule resource. +func resourceVSphereDatastoreClusterVMAntiAffinityRuleIDString(d structure.ResourceIDStringer) string { + return structure.ResourceIDString(d, resourceVSphereDatastoreClusterVMAntiAffinityRuleName) +} + +// resourceVSphereDatastoreClusterVMAntiAffinityRuleFlattenID makes an ID for the +// vsphere_datastore_cluster_vm_anti_affinity_rule resource. +func resourceVSphereDatastoreClusterVMAntiAffinityRuleFlattenID(pod *object.StoragePod, key int32) (string, error) { + podID := pod.Reference().Value + return strings.Join([]string{podID, strconv.Itoa(int(key))}, ":"), nil +} + +// resourceVSphereDatastoreClusterVMAntiAffinityRuleParseID parses an ID for the +// vsphere_datastore_cluster_vm_anti_affinity_rule and outputs its parts. +func resourceVSphereDatastoreClusterVMAntiAffinityRuleParseID(id string) (string, int32, error) { + parts := strings.SplitN(id, ":", 3) + if len(parts) < 2 { + return "", 0, fmt.Errorf("bad ID %q", id) + } + + key, err := strconv.Atoi(parts[1]) + if err != nil { + return "", 0, fmt.Errorf("bad key in ID %q: %s", parts[1], err) + } + + return parts[0], int32(key), nil +} + +// resourceVSphereDatastoreClusterVMAntiAffinityRuleFindEntry attempts to +// locate an existing VM anti-affinity rule in a datastore cluster's +// configuration by key. It's used by the resource's read functionality and +// tests. nil is returned if the entry cannot be found. +func resourceVSphereDatastoreClusterVMAntiAffinityRuleFindEntry( + pod *object.StoragePod, + key int32, +) (*types.ClusterAntiAffinityRuleSpec, error) { + props, err := storagepod.Properties(pod) + if err != nil { + return nil, fmt.Errorf("error fetching datastore cluster properties: %s", err) + } + + for _, info := range props.PodStorageDrsEntry.StorageDrsConfig.PodConfig.Rule { + if info.GetClusterRuleInfo().Key == key { + if vmAntiAffinityRuleInfo, ok := info.(*types.ClusterAntiAffinityRuleSpec); ok { + log.Printf("[DEBUG] Found VM anti-affinity rule key %d in datastore cluster %q", key, pod.Name()) + return vmAntiAffinityRuleInfo, nil + } + return nil, fmt.Errorf("rule key %d in datastore cluster %q is not a VM anti-affinity rule", key, pod.Name()) + } + } + + log.Printf("[DEBUG] No VM anti-affinity rule key %d found in datastore cluster %q", key, pod.Name()) + return nil, nil +} + +// resourceVSphereDatastoreClusterVMAntiAffinityRuleFindEntryByName attempts to +// locate an existing VM anti-affinity rule in a datastore cluster's +// configuration by name. It differs from the standard +// resourceVSphereDatastoreClusterVMAntiAffinityRuleFindEntry in that we don't +// allow missing entries, as it's designed to be used in places where we don't +// want to allow for missing entries, such as during creation and import. +func resourceVSphereDatastoreClusterVMAntiAffinityRuleFindEntryByName( + pod *object.StoragePod, + name string, +) (*types.ClusterAntiAffinityRuleSpec, error) { + props, err := storagepod.Properties(pod) + if err != nil { + return nil, fmt.Errorf("error fetching datastore cluster properties: %s", err) + } + + for _, info := range props.PodStorageDrsEntry.StorageDrsConfig.PodConfig.Rule { + if info.GetClusterRuleInfo().Name == name { + if vmAntiAffinityRuleInfo, ok := info.(*types.ClusterAntiAffinityRuleSpec); ok { + log.Printf("[DEBUG] Found VM anti-affinity rule %q in datastore cluster %q", name, pod.Name()) + return vmAntiAffinityRuleInfo, nil + } + return nil, fmt.Errorf("rule %q in datastore cluster %q is not a VM anti-affinity rule", name, pod.Name()) + } + } + + return nil, fmt.Errorf("no VM anti-affinity rule %q found in datastore cluster %q", name, pod.Name()) +} + +// resourceVSphereDatastoreClusterVMAntiAffinityRuleObjects handles the +// fetching of the cluster and rule key depending on what attributes are +// available: +// * If the resource ID is available, the data is derived from the ID. +// * If not, only the cluster is retrieved from datastore_cluster_id. -1 is +// returned for the key. +func resourceVSphereDatastoreClusterVMAntiAffinityRuleObjects( + d *schema.ResourceData, + meta interface{}, +) (*object.StoragePod, int32, error) { + if d.Id() != "" { + return resourceVSphereDatastoreClusterVMAntiAffinityRuleObjectsFromID(d, meta) + } + return resourceVSphereDatastoreClusterVMAntiAffinityRuleObjectsFromAttributes(d, meta) +} + +func resourceVSphereDatastoreClusterVMAntiAffinityRuleObjectsFromAttributes( + d *schema.ResourceData, + meta interface{}, +) (*object.StoragePod, int32, error) { + return resourceVSphereDatastoreClusterVMAntiAffinityRuleFetchObjects( + meta, + d.Get("datastore_cluster_id").(string), + -1, + ) +} + +func resourceVSphereDatastoreClusterVMAntiAffinityRuleObjectsFromID( + d structure.ResourceIDStringer, + meta interface{}, +) (*object.StoragePod, int32, error) { + // Note that this function uses structure.ResourceIDStringer to satisfy + // interfacer. Adding exceptions in the comments does not seem to work. + // Change this back to ResourceData if it's needed in the future. + podID, key, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleParseID(d.Id()) + if err != nil { + return nil, 0, err + } + + return resourceVSphereDatastoreClusterVMAntiAffinityRuleFetchObjects(meta, podID, key) +} + +// resourceVSphereDatastoreClusterVMAntiAffinityRuleFetchObjects fetches the +// "objects" for a cluster rule. This is currently just the cluster object as +// the rule key a static value and a pass-through - this is to keep its +// workflow consistent with other cluster-dependent resources that derive from +// ArrayUpdateSpec that have managed object as keys, such as VM and host +// overrides. +func resourceVSphereDatastoreClusterVMAntiAffinityRuleFetchObjects( + meta interface{}, + podID string, + key int32, +) (*object.StoragePod, int32, error) { + client, err := resourceVSphereDatastoreClusterVMAntiAffinityRuleClient(meta) + if err != nil { + return nil, 0, err + } + + pod, err := storagepod.FromID(client, podID) + if err != nil { + return nil, 0, fmt.Errorf("cannot locate datastore cluster: %s", err) + } + + return pod, key, nil +} + +func resourceVSphereDatastoreClusterVMAntiAffinityRuleClient(meta interface{}) (*govmomi.Client, error) { + client := meta.(*VSphereClient).vimClient + if err := viapi.ValidateVirtualCenter(client); err != nil { + return nil, err + } + return client, nil +} + +// resourceVSphereDatastoreClusterVMAntiAffinityRuleApplySDRSConfigSpec +// applying a SDRS config spec for the +// vsphere_datastore_cluster_vm_anti_affinity_rule resource. +// +// This is wrapped to abstract the fact that we are deriving the client from +// the StoragePod. This is because helper workflows that have been created more +// recently (ie: cluster helpers) do this, and more than likely the storagepod +// helper will do it eventually as well. If there is ever an issue with this, +// it can be changed here. There should be no issue though as govmomi.Client +// is mainly just vim25.Client with some additional session helper bits that is +// not normally needed during normal operation. +func resourceVSphereDatastoreClusterVMAntiAffinityRuleApplySDRSConfigSpec( + pod *object.StoragePod, + spec types.StorageDrsConfigSpec, +) error { + return storagepod.ApplyDRSConfiguration( + &govmomi.Client{ + Client: pod.Client(), + }, + pod, + spec, + ) +} + +// resourceVSphereDatastoreClusterVMAntiAffinityRuleValidateRuleVMCount ensures +// that the VM count in the anti-affinity rule at any point in time before it's +// created or updated is a length of at least 2. +// +// This validation is necessary as a rule of only 1 VM here is a no-op and +// ultimately will result in a broken resource (the rule will not exist after +// creation, or example). Unfortunately, this needs to happen at apply time +// right now due to issues with TF core and how it processes lists when values +// are computed. Once these issues are fixed with TF core, the validation here +// should be removed and moved to schema. +func resourceVSphereDatastoreClusterVMAntiAffinityRuleValidateRuleVMCount(d *schema.ResourceData) error { + if d.Get("virtual_machine_ids").(*schema.Set).Len() < 2 { + return errors.New("length of virtual_machine_ids must be 2 or more") + } + return nil +} diff --git a/vsphere/resource_vsphere_datastore_cluster_vm_anti_affinity_rule_test.go b/vsphere/resource_vsphere_datastore_cluster_vm_anti_affinity_rule_test.go new file mode 100644 index 000000000..d5aeee95e --- /dev/null +++ b/vsphere/resource_vsphere_datastore_cluster_vm_anti_affinity_rule_test.go @@ -0,0 +1,447 @@ +package vsphere + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "reflect" + "sort" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/structure" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/viapi" + "github.com/terraform-providers/terraform-provider-vsphere/vsphere/internal/helper/virtualmachine" + "github.com/vmware/govmomi/vim25/types" +) + +func TestAccResourceVSphereDatastoreClusterVMAntiAffinityRule_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccResourceVSphereDatastoreClusterVMAntiAffinityRulePreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleConfig(2, true), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(true), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchBase( + true, + false, + "terraform-test-datastore-cluster-anti-affinity-rule", + ), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembership(), + ), + }, + }, + }) +} + +func TestAccResourceVSphereDatastoreClusterVMAntiAffinityRule_updateEnabled(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccResourceVSphereDatastoreClusterVMAntiAffinityRulePreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleConfig(2, true), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(true), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchBase( + true, + false, + "terraform-test-datastore-cluster-anti-affinity-rule", + ), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembership(), + ), + }, + { + Config: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleConfig(2, false), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(true), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchBase( + false, + false, + "terraform-test-datastore-cluster-anti-affinity-rule", + ), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembership(), + ), + }, + }, + }) +} + +func TestAccResourceVSphereDatastoreClusterVMAntiAffinityRule_updateCount(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccResourceVSphereDatastoreClusterVMAntiAffinityRulePreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleConfig(2, true), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(true), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchBase( + true, + false, + "terraform-test-datastore-cluster-anti-affinity-rule", + ), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembership(), + ), + }, + { + Config: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleConfig(3, true), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(true), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchBase( + true, + false, + "terraform-test-datastore-cluster-anti-affinity-rule", + ), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembership(), + ), + }, + }, + }) +} + +func TestAccResourceVSphereDatastoreClusterVMAntiAffinityRule_import(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + testAccResourceVSphereDatastoreClusterVMAntiAffinityRulePreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(false), + Steps: []resource.TestStep{ + { + Config: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleConfig(2, true), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(true), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembership(), + ), + }, + { + ResourceName: "vsphere_datastore_cluster_vm_anti_affinity_rule.cluster_vm_anti_affinity_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: func(s *terraform.State) (string, error) { + pod, err := testGetDatastoreCluster(s, "datastore_cluster") + if err != nil { + return "", err + } + + rs, ok := s.RootModule().Resources["vsphere_datastore_cluster_vm_anti_affinity_rule.cluster_vm_anti_affinity_rule"] + if !ok { + return "", errors.New("no resource at address vsphere_datastore_cluster_vm_anti_affinity_rule.cluster_vm_anti_affinity_rule") + } + name, ok := rs.Primary.Attributes["name"] + if !ok { + return "", errors.New("vsphere_datastore_cluster_vm_anti_affinity_rule.cluster_vm_anti_affinity_rule has no name attribute") + } + + m := make(map[string]string) + m["datastore_cluster_path"] = pod.InventoryPath + m["name"] = name + b, err := json.Marshal(m) + if err != nil { + return "", err + } + + return string(b), nil + }, + Config: testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleConfig(2, true), + Check: resource.ComposeTestCheckFunc( + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(true), + testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembership(), + ), + }, + }, + }) +} + +func testAccResourceVSphereDatastoreClusterVMAntiAffinityRulePreCheck(t *testing.T) { + if os.Getenv("VSPHERE_DATACENTER") == "" { + t.Skip("set VSPHERE_DATACENTER to run vsphere_datastore_cluster_vm_anti_affinity_rule acceptance tests") + } + if os.Getenv("VSPHERE_NAS_HOST") == "" { + t.Skip("set VSPHERE_NAS_HOST to run vsphere_datastore_cluster_vm_anti_affinity_rule acceptance tests") + } + if os.Getenv("VSPHERE_NFS_PATH") == "" { + t.Skip("set VSPHERE_NFS_PATH to run vsphere_datastore_cluster_vm_anti_affinity_rule acceptance tests") + } + if os.Getenv("VSPHERE_ESXI_HOST") == "" { + t.Skip("set VSPHERE_ESXI_HOST to run vsphere_datastore_cluster_vm_anti_affinity_rule acceptance tests") + } + if os.Getenv("VSPHERE_ESXI_HOST2") == "" { + t.Skip("set VSPHERE_ESXI_HOST2 to run vsphere_datastore_cluster_vm_anti_affinity_rule acceptance tests") + } + if os.Getenv("VSPHERE_ESXI_HOST3") == "" { + t.Skip("set VSPHERE_ESXI_HOST3 to run vsphere_datastore_cluster_vm_anti_affinity_rule acceptance tests") + } + if os.Getenv("VSPHERE_CLUSTER") == "" { + t.Skip("set VSPHERE_CLUSTER to run vsphere_datastore_cluster_vm_anti_affinity_rule acceptance tests") + } + if os.Getenv("VSPHERE_NETWORK_LABEL_PXE") == "" { + t.Skip("set VSPHERE_NETWORK_LABEL_PXE to run vsphere_datastore_cluster_vm_anti_affinity_rule acceptance tests") + } +} + +func testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleExists(expected bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + info, err := testGetDatastoreClusterVMAntiAffinityRule(s, "cluster_vm_anti_affinity_rule") + if err != nil { + if expected == false { + if viapi.IsManagedObjectNotFoundError(err) { + // This is not necessarily a missing rule, but more than likely a + // missing cluster, which happens during destroy as the dependent + // resources will be missing as well, so want to treat this as a + // deleted rule as well. + return nil + } + } + return err + } + + switch { + case info == nil && !expected: + // Expected missing + return nil + case info == nil && expected: + // Expected to exist + return errors.New("cluster rule missing when expected to exist") + case !expected: + return errors.New("cluster rule still present when expected to be missing") + } + + return nil + } +} + +func testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchBase( + enabled bool, + mandatory bool, + name string, +) resource.TestCheckFunc { + return func(s *terraform.State) error { + actual, err := testGetDatastoreClusterVMAntiAffinityRule(s, "cluster_vm_anti_affinity_rule") + if err != nil { + return err + } + + if actual == nil { + return errors.New("cluster rule missing") + } + + expected := &types.ClusterAntiAffinityRuleSpec{ + ClusterRuleInfo: types.ClusterRuleInfo{ + Enabled: structure.BoolPtr(enabled), + Mandatory: structure.BoolPtr(mandatory), + Name: name, + UserCreated: structure.BoolPtr(true), + InCompliance: actual.InCompliance, + Key: actual.Key, + RuleUuid: actual.RuleUuid, + Status: actual.Status, + }, + Vm: actual.Vm, + } + + if !reflect.DeepEqual(expected, actual) { + return spew.Errorf("expected %#v got %#v", expected, actual) + } + + return nil + } +} + +func testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembership() resource.TestCheckFunc { + return func(s *terraform.State) error { + actual, err := testGetDatastoreClusterVMAntiAffinityRule(s, "cluster_vm_anti_affinity_rule") + if err != nil { + return err + } + + if actual == nil { + return errors.New("cluster rule missing") + } + + vms, err := testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembershipVMIDs(s) + if err != nil { + return err + } + + expectedSort := structure.MoRefSorter(vms) + sort.Sort(expectedSort) + + expected := &types.ClusterAntiAffinityRuleSpec{ + ClusterRuleInfo: actual.ClusterRuleInfo, + Vm: actual.Vm, + } + + actualSort := structure.MoRefSorter(actual.Vm) + sort.Sort(actualSort) + actual.Vm = []types.ManagedObjectReference(actualSort) + + if !reflect.DeepEqual(expected, actual) { + return spew.Errorf("expected %#v got %#v", expected, actual) + } + + return nil + } +} + +func testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleMatchMembershipVMIDs(s *terraform.State) ([]types.ManagedObjectReference, error) { + var ids []string + if rs, ok := s.RootModule().Resources["vsphere_virtual_machine.vm"]; ok { + ids = []string{rs.Primary.ID} + } else { + ids = testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleGetMultiple(s) + } + + results, err := virtualmachine.MOIDsForUUIDs(testAccProvider.Meta().(*VSphereClient).vimClient, ids) + if err != nil { + return nil, err + } + return results.ManagedObjectReferences(), nil +} + +func testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleGetMultiple(s *terraform.State) []string { + var i int + var ids []string + for { + rs, ok := s.RootModule().Resources[fmt.Sprintf("vsphere_virtual_machine.vm.%d", i)] + if !ok { + break + } + ids = append(ids, rs.Primary.ID) + i++ + } + return ids +} + +func testAccResourceVSphereDatastoreClusterVMAntiAffinityRuleConfig(count int, enabled bool) string { + return fmt.Sprintf(` +variable "datacenter" { + default = "%s" +} + +variable "nfs_host" { + default = "%s" +} + +variable "nfs_path" { + default = "%s" +} + +variable "esxi_hosts" { + default = [ + "%s", + "%s", + "%s", + ] +} + +variable "cluster" { + default = "%s" +} + +variable "network_label" { + default = "%s" +} + +variable "vm_count" { + default = "%d" +} + +data "vsphere_datacenter" "dc" { + name = "${var.datacenter}" +} + +data "vsphere_host" "esxi_hosts" { + count = "${length(var.esxi_hosts)}" + name = "${var.esxi_hosts[count.index]}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +data "vsphere_compute_cluster" "cluster" { + name = "${var.cluster}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +data "vsphere_network" "network" { + name = "${var.network_label}" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +resource "vsphere_datastore_cluster" "datastore_cluster" { + name = "terraform-datastore-cluster-test" + datacenter_id = "${data.vsphere_datacenter.dc.id}" + sdrs_enabled = true +} + +resource "vsphere_nas_datastore" "datastore" { + name = "terraform-test-nas" + host_system_ids = ["${data.vsphere_host.esxi_hosts.*.id}"] + datastore_cluster_id = "${vsphere_datastore_cluster.datastore_cluster.id}" + + type = "NFS" + remote_hosts = ["${var.nfs_host}"] + remote_path = "${var.nfs_path}" +} + +resource "vsphere_virtual_machine" "vm" { + count = "${var.vm_count}" + name = "terraform-test-${count.index}" + resource_pool_id = "${data.vsphere_compute_cluster.cluster.resource_pool_id}" + datastore_cluster_id = "${vsphere_datastore_cluster.datastore_cluster.id}" + + num_cpus = 2 + memory = 2048 + guest_id = "other3xLinux64Guest" + + wait_for_guest_net_timeout = -1 + + network_interface { + network_id = "${data.vsphere_network.network.id}" + } + + disk { + label = "disk0" + size = 20 + } + + depends_on = ["vsphere_nas_datastore.datastore"] +} + +resource "vsphere_datastore_cluster_vm_anti_affinity_rule" "cluster_vm_anti_affinity_rule" { + name = "terraform-test-datastore-cluster-anti-affinity-rule" + datastore_cluster_id = "${vsphere_datastore_cluster.datastore_cluster.id}" + virtual_machine_ids = ["${vsphere_virtual_machine.vm.*.id}"] + enabled = %t +} +`, + os.Getenv("VSPHERE_DATACENTER"), + os.Getenv("VSPHERE_NAS_HOST"), + os.Getenv("VSPHERE_NFS_PATH"), + os.Getenv("VSPHERE_ESXI_HOST"), + os.Getenv("VSPHERE_ESXI_HOST2"), + os.Getenv("VSPHERE_ESXI_HOST3"), + os.Getenv("VSPHERE_CLUSTER"), + os.Getenv("VSPHERE_NETWORK_LABEL_PXE"), + count, + enabled, + ) +} diff --git a/website/docs/r/datastore_cluster_vm_anti_affinity_rule.html.markdown b/website/docs/r/datastore_cluster_vm_anti_affinity_rule.html.markdown new file mode 100644 index 000000000..8ba36f938 --- /dev/null +++ b/website/docs/r/datastore_cluster_vm_anti_affinity_rule.html.markdown @@ -0,0 +1,130 @@ +--- +layout: "vsphere" +page_title: "VMware vSphere: vsphere_datastore_cluster_vm_anti_affinity_rule" +sidebar_current: "docs-vsphere-resource-storage-cluster-datastore-vm-anti-affinity-rule" +description: |- + Provides a VMware vSphere datastore cluster virtual machine anti-affinity rule. This can be used to manage rules to tell virtual machines to run on separate datastores. +--- + +# vsphere\_datastore\_cluster\_vm\_anti\_affinity\_rule + +The `vsphere_datastore_cluster_vm_anti_affinity_rule` resource can be used to +manage VM anti-affinity rules in a datastore cluster, either created by the +[`vsphere_datastore_cluster`][tf-vsphere-datastore-cluster-resource] resource or looked up +by the [`vsphere_datastore_cluster`][tf-vsphere-datastore-cluster-data-source] data source. + +[tf-vsphere-datastore-cluster-resource]: /docs/providers/vsphere/r/datastore_cluster.html +[tf-vsphere-datastore-cluster-data-source]: /docs/providers/vsphere/d/datastore_cluster.html + +This rule can be used to tell a set to virtual machines to run on different +datastores within a cluster, useful for preventing single points of failure in +application cluster scenarios. When configured, Storage DRS will make a best effort to +ensure that the virtual machines run on different datastores, or prevent any +operation that would keep that from happening, depending on the value of the +[`mandatory`](#mandatory) flag. + +~> **NOTE:** This resource requires vCenter and is not available on direct ESXi +connections. + +~> **NOTE:** Storage DRS requires a vSphere Enterprise Plus license. + +## Example Usage + +The example below creates two virtual machines in a cluster using the +[`vsphere_virtual_machine`][tf-vsphere-vm-resource] resource, creating the +virtual machines in the datastore cluster looked up by the +[`vsphere_datastore_cluster`][tf-vsphere-datastore-cluster-data-source] data +source. It then creates an anti-affinity rule for these two virtual machines, +ensuring they will run on different datastores whenever possible. + +[tf-vsphere-vm-resource]: /docs/providers/vsphere/r/virtual_machine.html + +```hcl +data "vsphere_datacenter" "dc" { + name = "dc1" +} + +data "vsphere_datastore_cluster" "datastore_cluster" { + name = "datastore-cluster1" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +data "vsphere_compute_cluster" "cluster" { + name = "cluster1" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +data "vsphere_network" "network" { + name = "network1" + datacenter_id = "${data.vsphere_datacenter.dc.id}" +} + +resource "vsphere_virtual_machine" "vm" { + count = 2 + name = "terraform-test-${count.index}" + resource_pool_id = "${data.vsphere_compute_cluster.cluster.resource_pool_id}" + datastore_cluster_id = "${data.vsphere_datastore_cluster.datastore_cluster.id}" + + num_cpus = 2 + memory = 2048 + guest_id = "other3xLinux64Guest" + + network_interface { + network_id = "${data.vsphere_network.network.id}" + } + + disk { + label = "disk0" + size = 20 + } +} + +resource "vsphere_datastore_cluster_vm_anti_affinity_rule" "cluster_vm_anti_affinity_rule" { + name = "terraform-test-datastore-cluster-vm-anti-affinity-rule" + datastore_cluster_id = "${data.vsphere_datastore_cluster.datastore_cluster.id}" + virtual_machine_ids = ["${vsphere_virtual_machine.vm.*.id}"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `compute_cluster_id` - (Required) The [managed object reference + ID][docs-about-morefs] of the cluster to put the group in. Forces a new + resource if changed. + +[docs-about-morefs]: /docs/providers/vsphere/index.html#use-of-managed-object-references-by-the-vsphere-provider + +* `name` - (Required) The name of the rule. This must be unique in the cluster. +* `virtual_machine_ids` - (Required) The UUIDs of the virtual machines to run + on different datastores from each other. + +~> **NOTE:** The minimum length of `virtual_machine_ids` is 2, and due to +current limitations in Terraform Core, the value is currently checked during +the apply phase, not the validation or plan phases. Ensure proper length of +this value to prevent failures mid-apply. + +* `enabled` - (Optional) Enable this rule in the cluster. Default: `true`. +* `mandatory` - (Optional) When this value is `true`, prevents any virtual + machine operations that may violate this rule. Default: `false`. + +## Attribute Reference + +The only attribute this resource exports is the `id` of the resource, which is +a combination of the [managed object reference ID][docs-about-morefs] of the +cluster, and the rule's key within the cluster configuration. + +## Importing + +An existing rule can be [imported][docs-import] into this resource by supplying +both the path to the cluster, and the name the rule. If the name or cluster is +not found, or if the rule is of a different type, an error will be returned. An +example is below: + +[docs-import]: https://www.terraform.io/docs/import/index.html +``` +terraform import vsphere_datastore_cluster_vm_anti_affinity_rule.cluster_vm_anti_affinity_rule \ + '{"compute_cluster_path": "/dc1/datastore/cluster1", \ + "name": "terraform-test-datastore-cluster-vm-anti-affinity-rule"}' +``` diff --git a/website/vsphere.erb b/website/vsphere.erb index 42f4a68e6..0e6567b72 100644 --- a/website/vsphere.erb +++ b/website/vsphere.erb @@ -145,6 +145,9 @@ > vsphere_datastore_cluster + > + vsphere_datastore_cluster_vm_anti_affinity_rule + > vsphere_file