diff --git a/aws/data_source_aws_ec2_instance_type.go b/aws/data_source_aws_ec2_instance_type.go new file mode 100644 index 00000000000..ce7fb45330f --- /dev/null +++ b/aws/data_source_aws_ec2_instance_type.go @@ -0,0 +1,439 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "log" +) + +func dataSourceAwsEc2InstanceType() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEc2InstanceTypeRead, + + Schema: map[string]*schema.Schema{ + "instance_type": { + Type: schema.TypeString, + Required: true, + }, + "current_generation": { + Type: schema.TypeBool, + Computed: true, + }, + "free_tier_eligible": { + Type: schema.TypeBool, + Computed: true, + }, + "supported_usages_classes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "supported_root_device_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "bare_metal": { + Type: schema.TypeBool, + Computed: true, + }, + "hypervisor": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "supported_architectures": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "sustained_clock_speed": { + Type: schema.TypeFloat, + Computed: true, + }, + "default_vcpus": { + Type: schema.TypeInt, + Computed: true, + }, + "default_cores": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + "default_threads_per_core": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + "valid_cores": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + "valid_threads_per_core": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + "memory_size": { + Type: schema.TypeInt, + Computed: true, + }, + "instance_storage_supported": { + Type: schema.TypeBool, + Computed: true, + }, + "total_instance_storage": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + "instance_disks": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeInt, + Computed: true, + }, + "count": { + Type: schema.TypeInt, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "ebs_optimized_support": { + Type: schema.TypeString, + Computed: true, + }, + "ebs_encryption_support": { + Type: schema.TypeString, + Computed: true, + }, + "network_performance": { + Type: schema.TypeString, + Computed: true, + }, + "maximum_network_interfaces": { + Type: schema.TypeInt, + Computed: true, + }, + "maximum_ipv4_addresses_per_interface": { + Type: schema.TypeInt, + Computed: true, + }, + "maximum_ipv6_addresses_per_interface": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + "ipv6_supported": { + Type: schema.TypeBool, + Computed: true, + }, + "ena_support": { + Type: schema.TypeString, + Computed: true, + }, + "gpus": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "manufacturer": { + Type: schema.TypeString, + Computed: true, + }, + "count": { + Type: schema.TypeInt, + Computed: true, + }, + "memory_size": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "total_gpu_memory": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + "fpgas": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "manufacturer": { + Type: schema.TypeString, + Computed: true, + }, + "count": { + Type: schema.TypeInt, + Computed: true, + }, + "memory_size": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "total_fpga_memory": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + }, + "supported_placement_strategies": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "accelerators": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "manufacturer": { + Type: schema.TypeString, + Computed: true, + }, + "count": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "hibernation_supported": { + Type: schema.TypeBool, + Computed: true, + }, + "burstable_performance_supported": { + Type: schema.TypeBool, + Computed: true, + }, + "dedicated_hosts_supported": { + Type: schema.TypeBool, + Computed: true, + }, + "auto_recovery_supported": { + Type: schema.TypeBool, + Computed: true, + }, + }, + } +} + +func dataSourceAwsEc2InstanceTypeRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + params := &ec2.DescribeInstanceTypesInput{} + + instanceType := d.Get("instance_type").(string) + params.InstanceTypes = []*string{aws.String(instanceType)} + log.Printf("[DEBUG] Reading instances types: %s", params) + + resp, err := conn.DescribeInstanceTypes(params) + if err != nil { + return err + } + if len(resp.InstanceTypes) == 0 { + return fmt.Errorf("no Instance Type found for %s", instanceType) + } + if len(resp.InstanceTypes) > 1 { + return fmt.Errorf("multiple instance types found for type %s", instanceType) + } + v := resp.InstanceTypes[0] + d.Set("instance_type", v.InstanceType) + if err := d.Set("current_generation", aws.BoolValue(v.CurrentGeneration)); err != nil { + return fmt.Errorf("error setting current_generation: %s", err) + } + if err := d.Set("free_tier_eligible", aws.BoolValue(v.FreeTierEligible)); err != nil { + return fmt.Errorf("error setting free_tier_eligible: %s", err) + } + if err := d.Set("supported_usages_classes", aws.StringValueSlice(v.SupportedUsageClasses)); err != nil { + return fmt.Errorf("error setting supported_usages_classes: %s", err) + } + if err := d.Set("supported_root_device_types", aws.StringValueSlice(v.SupportedRootDeviceTypes)); err != nil { + return fmt.Errorf("error setting supported_root_device_types: %s", err) + } + if err := d.Set("bare_metal", aws.BoolValue(v.BareMetal)); err != nil { + return fmt.Errorf("error setting bare_metal: %s", err) + } + if v.Hypervisor != nil { + if err := d.Set("hypervisor", aws.StringValue(v.Hypervisor)); err != nil { + return fmt.Errorf("error setting hypervisor: %s", err) + } + } + if err := d.Set("supported_architectures", aws.StringValueSlice(v.ProcessorInfo.SupportedArchitectures)); err != nil { + return fmt.Errorf("error setting supported_architectures: %s", err) + } + if err := d.Set("sustained_clock_speed", aws.Float64Value(v.ProcessorInfo.SustainedClockSpeedInGhz)); err != nil { + return fmt.Errorf("error setting sustained_clock_speed: %s", err) + } + if err := d.Set("default_vcpus", aws.Int64Value(v.VCpuInfo.DefaultVCpus)); err != nil { + return fmt.Errorf("error setting default_vcpus: %s", err) + } + if v.VCpuInfo.DefaultCores != nil { + if err := d.Set("default_cores", aws.Int64Value(v.VCpuInfo.DefaultCores)); err != nil { + return fmt.Errorf("error setting default_cores: %s", err) + } + } + if v.VCpuInfo.DefaultThreadsPerCore != nil { + if err := d.Set("default_threads_per_core", aws.Int64Value(v.VCpuInfo.DefaultThreadsPerCore)); err != nil { + return fmt.Errorf("error setting default_threads_per_core: %s", err) + } + } + if v.VCpuInfo.ValidThreadsPerCore != nil { + if err := d.Set("valid_threads_per_core", aws.Int64ValueSlice(v.VCpuInfo.ValidThreadsPerCore)); err != nil { + return fmt.Errorf("error setting valid_threads_per_core: %s", err) + } + } + if v.VCpuInfo.ValidCores != nil { + if err := d.Set("valid_cores", aws.Int64ValueSlice(v.VCpuInfo.ValidCores)); err != nil { + return fmt.Errorf("error setting valid_cores: %s", err) + } + } + if err := d.Set("memory_size", aws.Int64Value(v.MemoryInfo.SizeInMiB)); err != nil { + return fmt.Errorf("error setting memory_size: %s", err) + } + if err := d.Set("instance_storage_supported", aws.BoolValue(v.InstanceStorageSupported)); err != nil { + return fmt.Errorf("error setting instance_storage_supported: %s", err) + } + if v.InstanceStorageInfo != nil { + if err := d.Set("total_instance_storage", aws.Int64Value(v.InstanceStorageInfo.TotalSizeInGB)); err != nil { + return fmt.Errorf("error setting total_instance_storage: %s", err) + } + if v.InstanceStorageInfo.Disks != nil { + diskList := make([]interface{}, len(v.InstanceStorageInfo.Disks)) + for i, dk := range v.InstanceStorageInfo.Disks { + disk := map[string]interface{}{ + "size": aws.Int64Value(dk.SizeInGB), + "count": aws.Int64Value(dk.Count), + "type": aws.StringValue(dk.Type), + } + diskList[i] = disk + } + if err := d.Set("instance_disks", diskList); err != nil { + return fmt.Errorf("error setting instance_disks: %s", err) + } + } + } + if err := d.Set("ebs_optimized_support", aws.StringValue(v.EbsInfo.EbsOptimizedSupport)); err != nil { + return fmt.Errorf("error setting ebs_optimized_support: %s", err) + } + if err := d.Set("ebs_encryption_support", aws.StringValue(v.EbsInfo.EncryptionSupport)); err != nil { + return fmt.Errorf("error setting ebs_encryption_support: %s", err) + } + if err := d.Set("network_performance", aws.StringValue(v.NetworkInfo.NetworkPerformance)); err != nil { + return fmt.Errorf("error setting network_performance: %s", err) + } + if err := d.Set("maximum_network_interfaces", aws.Int64Value(v.NetworkInfo.MaximumNetworkInterfaces)); err != nil { + return fmt.Errorf("error setting maximum_network_interfaces: %s", err) + } + if err := d.Set("maximum_ipv4_addresses_per_interface", aws.Int64Value(v.NetworkInfo.Ipv4AddressesPerInterface)); err != nil { + return fmt.Errorf("error setting ipv4_addresses_per_interface: %s", err) + } + if err := d.Set("maximum_ipv6_addresses_per_interface", aws.Int64Value(v.NetworkInfo.Ipv6AddressesPerInterface)); err != nil { + return fmt.Errorf("error setting ipv6_addresses_per_interface: %s", err) + } + if err := d.Set("ipv6_supported", aws.BoolValue(v.NetworkInfo.Ipv6Supported)); err != nil { + return fmt.Errorf("error setting ipv6_supported: %s", err) + } + if err := d.Set("ena_support", aws.StringValue(v.NetworkInfo.EnaSupport)); err != nil { + return fmt.Errorf("error setting ena_support: %s", err) + } + if v.GpuInfo != nil { + gpuList := make([]interface{}, len(v.GpuInfo.Gpus)) + for i, gp := range v.GpuInfo.Gpus { + gpu := map[string]interface{}{ + "manufacturer": aws.StringValue(gp.Manufacturer), + "name": aws.StringValue(gp.Name), + "count": aws.Int64Value(gp.Count), + "memory_size": aws.Int64Value(gp.MemoryInfo.SizeInMiB), + } + gpuList[i] = gpu + } + if err := d.Set("gpus", gpuList); err != nil { + return fmt.Errorf("error setting gpu: %s", err) + } + if err := d.Set("total_gpu_memory", aws.Int64Value(v.GpuInfo.TotalGpuMemoryInMiB)); err != nil { + return fmt.Errorf("error setting total_gpu_memory: %s", err) + } + } + if v.FpgaInfo != nil { + fpgaList := make([]interface{}, len(v.FpgaInfo.Fpgas)) + for i, fpg := range v.FpgaInfo.Fpgas { + fpga := map[string]interface{}{ + "manufacturer": aws.StringValue(fpg.Manufacturer), + "name": aws.StringValue(fpg.Name), + "count": aws.Int64Value(fpg.Count), + "memory_size": aws.Int64Value(fpg.MemoryInfo.SizeInMiB), + } + fpgaList[i] = fpga + } + if err := d.Set("fpgas", fpgaList); err != nil { + return fmt.Errorf("error setting fpga: %s", err) + } + if err := d.Set("total_fpga_memory", aws.Int64Value(v.FpgaInfo.TotalFpgaMemoryInMiB)); err != nil { + return fmt.Errorf("error setting total_fpga_memory: %s", err) + } + } + if err := d.Set("supported_placement_strategies", aws.StringValueSlice(v.PlacementGroupInfo.SupportedStrategies)); err != nil { + return fmt.Errorf("error setting supported_placement_strategies: %s", err) + } + if v.InferenceAcceleratorInfo != nil { + acceleratorList := make([]interface{}, len(v.InferenceAcceleratorInfo.Accelerators)) + for i, accl := range v.InferenceAcceleratorInfo.Accelerators { + accelerator := map[string]interface{}{ + "manufacturer": aws.StringValue(accl.Manufacturer), + "name": aws.StringValue(accl.Name), + "count": aws.Int64Value(accl.Count), + } + acceleratorList[i] = accelerator + } + if err := d.Set("accelerators", acceleratorList); err != nil { + return fmt.Errorf("error setting fpga: %s", err) + } + } + if err := d.Set("hibernation_supported", aws.BoolValue(v.HibernationSupported)); err != nil { + return fmt.Errorf("error setting hibernation_supported: %s", err) + } + if err := d.Set("burstable_performance_supported", aws.BoolValue(v.BurstablePerformanceSupported)); err != nil { + return fmt.Errorf("error setting burstable_performance_supported: %s", err) + } + if err := d.Set("dedicated_hosts_supported", aws.BoolValue(v.DedicatedHostsSupported)); err != nil { + return fmt.Errorf("error setting dedicated_hosts_supported: %s", err) + } + if err := d.Set("auto_recovery_supported", aws.BoolValue(v.AutoRecoverySupported)); err != nil { + return fmt.Errorf("error setting auto_recovery_supported: %s", err) + } + d.SetId(aws.StringValue(v.InstanceType)) + return nil +} diff --git a/aws/data_source_aws_ec2_instance_type_test.go b/aws/data_source_aws_ec2_instance_type_test.go new file mode 100644 index 00000000000..4f71326769e --- /dev/null +++ b/aws/data_source_aws_ec2_instance_type_test.go @@ -0,0 +1,101 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceAwsEc2InstanceType_attributes(t *testing.T) { + resourceMetal := "data.aws_ec2_instance_type.metal" + resourceGpu := "data.aws_ec2_instance_type.gpu" + resourceFpga := "data.aws_ec2_instance_type.fpga" + resourceAccelerator := "data.aws_ec2_instance_type.accelerator" + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceEc2InstanceType, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceMetal, "auto_recovery_supported", "false"), + resource.TestCheckResourceAttr(resourceMetal, "bare_metal", "true"), + resource.TestCheckResourceAttr(resourceMetal, "burstable_performance_supported", "false"), + resource.TestCheckResourceAttr(resourceMetal, "current_generation", "true"), + resource.TestCheckResourceAttr(resourceMetal, "dedicated_hosts_supported", "true"), + resource.TestCheckResourceAttr(resourceMetal, "default_vcpus", "96"), + resource.TestCheckResourceAttr(resourceMetal, "ebs_encryption_support", "supported"), + resource.TestCheckResourceAttr(resourceMetal, "ebs_optimized_support", "default"), + resource.TestCheckResourceAttr(resourceMetal, "ena_support", "required"), + resource.TestCheckResourceAttr(resourceMetal, "free_tier_eligible", "false"), + resource.TestCheckResourceAttr(resourceMetal, "hibernation_supported", "false"), + resource.TestCheckResourceAttr(resourceMetal, "instance_storage_supported", "true"), + resource.TestCheckResourceAttr(resourceMetal, "instance_type", "i3en.metal"), + resource.TestCheckResourceAttr(resourceMetal, "ipv6_supported", "true"), + resource.TestCheckResourceAttr(resourceMetal, "maximum_ipv4_addresses_per_interface", "50"), + resource.TestCheckResourceAttr(resourceMetal, "maximum_ipv6_addresses_per_interface", "50"), + resource.TestCheckResourceAttr(resourceMetal, "maximum_network_interfaces", "15"), + resource.TestCheckResourceAttr(resourceMetal, "memory_size", "786432"), + resource.TestCheckResourceAttr(resourceMetal, "network_performance", "100 Gigabit"), + resource.TestCheckResourceAttr(resourceMetal, "supported_architectures.#", "1"), + resource.TestCheckResourceAttr(resourceMetal, "supported_architectures.0", "x86_64"), + resource.TestCheckResourceAttr(resourceMetal, "supported_placement_strategies.#", "3"), + resource.TestCheckResourceAttr(resourceMetal, "supported_placement_strategies.0", "cluster"), + resource.TestCheckResourceAttr(resourceMetal, "supported_placement_strategies.1", "partition"), + resource.TestCheckResourceAttr(resourceMetal, "supported_placement_strategies.2", "spread"), + resource.TestCheckResourceAttr(resourceMetal, "supported_root_device_types.#", "1"), + resource.TestCheckResourceAttr(resourceMetal, "supported_root_device_types.0", "ebs"), + resource.TestCheckResourceAttr(resourceMetal, "supported_usages_classes.#", "2"), + resource.TestCheckResourceAttr(resourceMetal, "supported_usages_classes.0", "on-demand"), + resource.TestCheckResourceAttr(resourceMetal, "supported_usages_classes.1", "spot"), + resource.TestCheckResourceAttr(resourceMetal, "sustained_clock_speed", "3.1"), + resource.TestCheckResourceAttr(resourceMetal, "total_instance_storage", "60000"), + resource.TestCheckResourceAttr(resourceMetal, "instance_disks.#", "1"), + resource.TestCheckResourceAttr(resourceMetal, "instance_disks.0.count", "8"), + resource.TestCheckResourceAttr(resourceMetal, "instance_disks.0.size", "7500"), + resource.TestCheckResourceAttr(resourceMetal, "instance_disks.0.type", "ssd"), + resource.TestCheckResourceAttr(resourceGpu, "total_gpu_memory", "4096"), + resource.TestCheckResourceAttr(resourceGpu, "hypervisor", "xen"), + resource.TestCheckResourceAttr(resourceGpu, "gpus.#", "1"), + resource.TestCheckResourceAttr(resourceGpu, "gpus.0.count", "1"), + resource.TestCheckResourceAttr(resourceGpu, "gpus.0.memory_size", "4096"), + resource.TestCheckResourceAttr(resourceGpu, "gpus.0.manufacturer", "NVIDIA"), + resource.TestCheckResourceAttr(resourceGpu, "gpus.0.name", "K520"), + resource.TestCheckResourceAttr(resourceGpu, "valid_threads_per_core.#", "2"), + resource.TestCheckResourceAttr(resourceGpu, "valid_threads_per_core.0", "1"), + resource.TestCheckResourceAttr(resourceGpu, "valid_threads_per_core.1", "2"), + resource.TestCheckResourceAttr(resourceGpu, "default_threads_per_core", "2"), + resource.TestCheckResourceAttr(resourceGpu, "default_cores", "4"), + resource.TestCheckResourceAttr(resourceGpu, "default_vcpus", "8"), + resource.TestCheckResourceAttr(resourceFpga, "fpgas.#", "1"), + resource.TestCheckResourceAttr(resourceFpga, "fpgas.0.name", "Virtex UltraScale (VU9P)"), + resource.TestCheckResourceAttr(resourceFpga, "fpgas.0.manufacturer", "Xilinx"), + resource.TestCheckResourceAttr(resourceFpga, "fpgas.0.count", "1"), + resource.TestCheckResourceAttr(resourceFpga, "fpgas.0.memory_size", "65536"), + resource.TestCheckResourceAttr(resourceFpga, "total_fpga_memory", "65536"), + resource.TestCheckResourceAttr(resourceAccelerator, "accelerators.#", "1"), + resource.TestCheckResourceAttr(resourceAccelerator, "accelerators.0.count", "1"), + resource.TestCheckResourceAttr(resourceAccelerator, "accelerators.0.name", "Inferentia"), + resource.TestCheckResourceAttr(resourceAccelerator, "accelerators.0.manufacturer", "AWS"), + resource.TestCheckResourceAttr(resourceAccelerator, "valid_cores.#", "1"), + resource.TestCheckResourceAttr(resourceAccelerator, "valid_cores.0", "2"), + ), + }, + }, + }) +} + +const testAccDataSourceEc2InstanceType = ` +data "aws_ec2_instance_type" "metal" { + instance_type="i3en.metal" +} +data "aws_ec2_instance_type" "gpu" { + instance_type="g2.2xlarge" +} +data "aws_ec2_instance_type" "fpga" { + instance_type="f1.2xlarge" +} +data "aws_ec2_instance_type" "accelerator" { + instance_type="inf1.xlarge" +} +` diff --git a/aws/provider.go b/aws/provider.go index 9b0ebc125c9..17ef69b6b80 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -214,6 +214,7 @@ func Provider() *schema.Provider { "aws_ebs_volumes": dataSourceAwsEbsVolumes(), "aws_ec2_coip_pool": dataSourceAwsEc2CoipPool(), "aws_ec2_coip_pools": dataSourceAwsEc2CoipPools(), + "aws_ec2_instance_type": dataSourceAwsEc2InstanceType(), "aws_ec2_instance_type_offering": dataSourceAwsEc2InstanceTypeOffering(), "aws_ec2_instance_type_offerings": dataSourceAwsEc2InstanceTypeOfferings(), "aws_ec2_local_gateway": dataSourceAwsEc2LocalGateway(), diff --git a/website/docs/d/ec2_instance_type.html.markdown b/website/docs/d/ec2_instance_type.html.markdown new file mode 100644 index 00000000000..932f6b11a09 --- /dev/null +++ b/website/docs/d/ec2_instance_type.html.markdown @@ -0,0 +1,83 @@ +--- +subcategory: "EC2" +layout: "aws" +page_title: "AWS: aws_ec2_instance_type" +description: |- + Information about single EC2 Instance Type. +--- + + +# Data Source: aws_ec2_instance_type + +Get characteristics for a single EC2 Instance Type. + +## Example Usage + +```hcl +data "aws_ec2_instance_type" "example" { + instance_type = "t2.micro" +} + +``` + +## Argument Reference + +The following argument is supported: + +* `instance_type` - (Required) Instance + +## Attribute Reference + +In addition to the argument above, the following attributes are exported: + +~> **NOTE:** Not all attributes are set for every instance type. + +* `accelerators` Describes the Inference accelerators for the instance type. + * `accelerators.#.count` - The number of Inference accelerators for the instance type. + * `accelerators.#.manufacturer` - The manufacturer of the Inference accelerator. + * `accelerators.#.name` - The name of the Inference accelerator. +* `auto_recovery_supported` - `true` if auto recovery is supported. +* `bare_metal` - `true` if it is a bare metal instance type. +* `burstable_performance_supported` - `true` if the instance type is a burstable performance instance type. +* `current_generation` - `true` if the instance type is a current generation. +* `default_cores` - The default number of cores for the instance type. +* `default_threads_per_core` - The default number of threads per core for the instance type. +* `default_vcpus` - The default number of vCPUs for the instance type. +* `dedicated_hosts_supported` - `true` if Dedicated Hosts are supported on the instance type. +* `ebs_encryption_support` - Indicates whether Amazon EBS encryption is supported. +* `ebs_optimized_support` - Indicates that the instance type is Amazon EBS-optimized. +* `ena_support` - Indicates whether Elastic Network Adapter (ENA) is supported. +* `fpgas` - Describes the FPGA accelerator settings for the instance type. + * `fpgas.#.count` - The count of FPGA accelerators for the instance type. + * `fpgas.#.manufacturer` - The manufacturer of the FPGA accelerator. + * `fpgas.#.memory_size` - The size (in MiB) for the memory available to the FPGA accelerator. + * `fpgas.#.name` - The name of the FPGA accelerator. +* `free_tier_eligible` - `true` if the instance type is eligible for the free tier. +* `gpus` - Describes the GPU accelerators for the instance type. + * `gpus.#.count` - The number of GPUs for the instance type. + * `gpus.#.manufacturer` - The manufacturer of the GPU accelerator. + * `gpus.#.memory_size` - The size (in MiB) for the memory available to the GPU accelerator. + * `gpus.#.name` - The name of the GPU accelerator. +* `hibernation_supported` - `true` if On-Demand hibernation is supported. +* `hypervisor` - Indicates the hypervisor used for the instance type. +* `ipv6_supported` - `true` if IPv6 is supported. +* `instance_disks` - Describes the disks for the instance type. + * `instance_disks.#.count` - The number of disks with this configuration. + * `instance_disks.#.size` - The size of the disk in GB. + * `instance_disks.#.type` - The type of disk. +* `instance_storage_supported` - `true` if instance storage is supported. +* `maximum_ipv4_addresses_per_interface` - The maximum number of IPv4 addresses per network interface. +* `maximum_ipv6_addresses_per_interface` - The maximum number of IPv6 addresses per network interface. +* `maximum_network_interfaces` - The maximum number of network interfaces for the instance type. +* `memory_size` - Size of the instance memory, in MiB. +* `network_performance` - Describes the network performance. +* `supported_architectures` - A list of architectures supported by the instance type. +* `supported_placement_strategies` - A list of supported placement groups types. +* `supported_root_device_types` - Indicates the supported root device types. +* `supported_usages_classes` - Indicates whether the instance type is offered for spot or On-Demand. +* `sustained_clock_speed` - The speed of the processor, in GHz. +* `total_fpga_memory` - The total memory of all FPGA accelerators for the instance type (in MiB). +* `total_gpu_memory` - The total size of the memory for the GPU accelerators for the instance type (in MiB). +* `total_instance_storage` - The total size of the instance disks, in GB. +* `valid_cores` - List of the valid number of cores that can be configured for the instance type. +* `valid_threads_per_core` - List of the valid number of threads per core that can be configured for the instance type.