diff --git a/aws/provider.go b/aws/provider.go index 34e5c292584..550b235a5dd 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -601,6 +601,7 @@ func Provider() *schema.Provider { "aws_elb_attachment": resourceAwsElbAttachment(), "aws_emr_cluster": resourceAwsEMRCluster(), "aws_emr_instance_group": resourceAwsEMRInstanceGroup(), + "aws_emr_managed_scaling_policy": resourceAwsEMRManagedScalingPolicy(), "aws_emr_security_configuration": resourceAwsEMRSecurityConfiguration(), "aws_flow_log": resourceAwsFlowLog(), "aws_fsx_lustre_file_system": resourceAwsFsxLustreFileSystem(), diff --git a/aws/resource_aws_emr_managed_scaling_policy.go b/aws/resource_aws_emr_managed_scaling_policy.go new file mode 100644 index 00000000000..9df131a4cd1 --- /dev/null +++ b/aws/resource_aws_emr_managed_scaling_policy.go @@ -0,0 +1,143 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "log" +) + +func resourceAwsEMRManagedScalingPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsEMRManagedScalingPolicyCreate, + Read: resourceAwsEMRManagedScalingPolicyRead, + Delete: resourceAwsEMRManagedScalingPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "compute_limits": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "unit_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(emr.ComputeLimitsUnitType_Values(), false), + }, + "minimum_capacity_units": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "maximum_capacity_units": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "maximum_core_capacity_units": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + "maximum_ondemand_capacity_units": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsEMRManagedScalingPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + + if l := d.Get("compute_limits").(*schema.Set).List(); len(l) > 0 && l[0] != nil { + cl := l[0].(map[string]interface{}) + computeLimits := &emr.ComputeLimits{ + UnitType: aws.String(cl["unit_type"].(string)), + MinimumCapacityUnits: aws.Int64(int64(cl["minimum_capacity_units"].(int))), + MaximumCapacityUnits: aws.Int64(int64(cl["maximum_capacity_units"].(int))), + } + if v, ok := cl["maximum_core_capacity_units"].(int); ok && v > 0 { + computeLimits.MaximumCoreCapacityUnits = aws.Int64(int64(v)) + } + if v, ok := cl["maximum_ondemand_capacity_units"].(int); ok && v > 0 { + computeLimits.MaximumOnDemandCapacityUnits = aws.Int64(int64(v)) + } + managedScalingPolicy := &emr.ManagedScalingPolicy{ + ComputeLimits: computeLimits, + } + + _, err := conn.PutManagedScalingPolicy(&emr.PutManagedScalingPolicyInput{ + ClusterId: aws.String(d.Get("cluster_id").(string)), + ManagedScalingPolicy: managedScalingPolicy, + }) + + if err != nil { + log.Printf("[ERROR] EMR.PutManagedScalingPolicy %s", err) + return fmt.Errorf("error putting EMR Managed Scaling Policy: %w", err) + } + } + + d.SetId(d.Get("cluster_id").(string)) + return nil +} + +func resourceAwsEMRManagedScalingPolicyRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + resp, err := conn.GetManagedScalingPolicy(&emr.GetManagedScalingPolicyInput{ + ClusterId: aws.String(d.Id()), + }) + if err != nil { + if isAWSErr(err, "InvalidRequestException", "does not exist") { + log.Printf("[WARN] EMR Managed Scaling Policy (%s) not found, removing from state", d.Get("cluster_id").(string)) + d.SetId("") + return nil + } + return fmt.Errorf("error getting EMR Managed Scaling Policy (%s): %w", d.Id(), err) + } + + if resp.ManagedScalingPolicy != nil { + attrs := make(map[string]interface{}) + attrs["unit_type"] = aws.StringValue(resp.ManagedScalingPolicy.ComputeLimits.UnitType) + attrs["minimum_capacity_units"] = aws.Int64Value(resp.ManagedScalingPolicy.ComputeLimits.MinimumCapacityUnits) + attrs["maximum_capacity_units"] = aws.Int64Value(resp.ManagedScalingPolicy.ComputeLimits.MaximumCapacityUnits) + attrs["maximum_core_capacity_units"] = aws.Int64Value(resp.ManagedScalingPolicy.ComputeLimits.MaximumCoreCapacityUnits) + attrs["maximum_ondemand_capacity_units"] = aws.Int64Value(resp.ManagedScalingPolicy.ComputeLimits.MaximumOnDemandCapacityUnits) + + computeLimits := make([]interface{}, 0) + computeLimits = append(computeLimits, attrs) + d.Set("compute_limits", computeLimits) + } + + return nil +} + +func resourceAwsEMRManagedScalingPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).emrconn + _, err := conn.RemoveManagedScalingPolicy(&emr.RemoveManagedScalingPolicyInput{ + ClusterId: aws.String(d.Get("cluster_id").(string)), + }) + if err != nil { + if isAWSErr(err, "InvalidRequestException", "does not exist") { + return nil + } + return fmt.Errorf("error removing EMR Managed Scaling Policy (%s): %w", d.Id(), err) + } + return nil +} diff --git a/aws/resource_aws_emr_managed_scaling_policy_test.go b/aws/resource_aws_emr_managed_scaling_policy_test.go new file mode 100644 index 00000000000..03a8b5a709a --- /dev/null +++ b/aws/resource_aws_emr_managed_scaling_policy_test.go @@ -0,0 +1,471 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/emr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccAwsEmrManagedScalingPolicy_basic(t *testing.T) { + resourceName := "aws_emr_managed_scaling_policy.testpolicy" + rName := acctest.RandomWithPrefix("tf-acc-test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEmrManagedScalingPolicyDestroy, + + Steps: []resource.TestStep{ + { + Config: testAccAWSEmrManagedScalingPolicy_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEmrManagedScalingPolicyExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumCoreCapacityUnits(t *testing.T) { + resourceName := "aws_emr_managed_scaling_policy.testpolicy" + rName := acctest.RandomWithPrefix("tf-acc-test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEmrManagedScalingPolicyDestroy, + + Steps: []resource.TestStep{ + { + Config: testAccAWSEmrManagedScalingPolicy_ComputeLimits_MaximumCoreCapacityUnits(rName, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEmrManagedScalingPolicyExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsEmrManagedScalingPolicy_ComputeLimits_MaximumOndemandCapacityUnits(t *testing.T) { + resourceName := "aws_emr_managed_scaling_policy.testpolicy" + rName := acctest.RandomWithPrefix("tf-acc-test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEmrManagedScalingPolicyDestroy, + + Steps: []resource.TestStep{ + { + Config: testAccAWSEmrManagedScalingPolicy_ComputeLimits_MaximumOndemandCapacityUnits(rName, 2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEmrManagedScalingPolicyExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAwsEmrManagedScalingPolicy_disappears(t *testing.T) { + resourceName := "aws_emr_managed_scaling_policy.testpolicy" + rName := acctest.RandomWithPrefix("tf-acc-test") + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEmrManagedScalingPolicyDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSEmrManagedScalingPolicy_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEmrManagedScalingPolicyExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsEMRManagedScalingPolicy(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccAWSEmrManagedScalingPolicy_basic(r string) string { + return fmt.Sprintf(testAccAWSEmrManagedScalingPolicyBase+` +resource "aws_emr_managed_scaling_policy" "testpolicy" { + cluster_id = aws_emr_cluster.test.id + compute_limits { + unit_type = "Instances" + minimum_capacity_units = 1 + maximum_capacity_units = 2 + } +} +`, r) +} + +func testAccAWSEmrManagedScalingPolicy_ComputeLimits_MaximumCoreCapacityUnits(r string, maximumCoreCapacityUnits int) string { + return fmt.Sprintf(testAccAWSEmrManagedScalingPolicyBase+` +resource "aws_emr_managed_scaling_policy" "testpolicy" { + cluster_id = aws_emr_cluster.test.id + compute_limits { + unit_type = "Instances" + minimum_capacity_units = 1 + maximum_capacity_units = 2 + maximum_core_capacity_units = %[2]d + } +} +`, r, maximumCoreCapacityUnits) +} + +func testAccAWSEmrManagedScalingPolicy_ComputeLimits_MaximumOndemandCapacityUnits(r string, maximumOndemandCapacityUnits int) string { + return fmt.Sprintf(testAccAWSEmrManagedScalingPolicyBase+` +resource "aws_emr_managed_scaling_policy" "testpolicy" { + cluster_id = aws_emr_cluster.test.id + compute_limits { + unit_type = "Instances" + minimum_capacity_units = 1 + maximum_capacity_units = 2 + maximum_ondemand_capacity_units = %[2]d + } +} +`, r, maximumOndemandCapacityUnits) +} + +func testAccCheckAWSEmrManagedScalingPolicyExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No EMR Managed Scaling Policy ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).emrconn + resp, err := conn.GetManagedScalingPolicy(&emr.GetManagedScalingPolicyInput{ + ClusterId: aws.String(rs.Primary.ID), + }) + if err != nil { + return err + } + + if resp.ManagedScalingPolicy == nil { + return fmt.Errorf("EMR Managed Scaling Policy is empty which shouldn't happen") + } + return nil + } +} + +func testAccCheckAWSEmrManagedScalingPolicyDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).emrconn + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_emr_managed_scaling_policy" { + continue + } + + resp, err := conn.GetManagedScalingPolicy(&emr.GetManagedScalingPolicyInput{ + ClusterId: aws.String(rs.Primary.ID), + }) + + if isAWSErr(err, "InvalidRequestException", "does not exist") { + return nil + } + + if err != nil { + return err + } + + if resp != nil { + return fmt.Errorf("Error: EMR Managed Scaling Policy still exists") + } + + return nil + } + + return nil +} + +const testAccAWSEmrManagedScalingPolicyBase = ` +data "aws_availability_zones" "available" { + # Many instance types are not available in this availability zone + exclude_zone_ids = ["usw2-az4"] + state = "available" + filter { + name = "opt-in-status" + values = ["opt-in-not-required"] + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + enable_dns_hostnames = true + + tags = { + Name = "tf-acc-test-emr-cluster" + } +} + +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + + tags = { + Name = "tf-acc-test-emr-cluster" + } +} + +resource "aws_security_group" "test" { + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + protocol = "-1" + self = true + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + tags = { + Name = "tf-acc-test-emr-cluster" + } + + # EMR will modify ingress rules + lifecycle { + ignore_changes = [ingress] + } +} + +resource "aws_subnet" "test" { + availability_zone = data.aws_availability_zones.available.names[0] + cidr_block = "10.0.0.0/24" + map_public_ip_on_launch = false + vpc_id = aws_vpc.test.id + + tags = { + Name = "tf-acc-test-emr-cluster" + } +} + +resource "aws_route_table" "test" { + vpc_id = aws_vpc.test.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } +} + +resource "aws_route_table_association" "test" { + route_table_id = aws_route_table.test.id + subnet_id = aws_subnet.test.id +} + +resource "aws_iam_role" "emr_service" { + name = "%[1]s_default_role" + + assume_role_policy = <