Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

resource/aws_elasticache_cluster: Add replication_group_id argument #3869

Merged
merged 7 commits into from
Mar 26, 2018
115 changes: 77 additions & 38 deletions aws/resource_aws_elasticache_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,14 @@ func resourceAwsElastiCacheCommonSchema() map[string]*schema.Schema {
},
"node_type": {
Type: schema.TypeString,
Required: true,
Optional: true,
Computed: true,
},
"engine": {
Type: schema.TypeString,
Required: true,
Optional: true,
//Computed: true, Set in resourceAwsElasticacheCluster because this Schema is used in resource_aws_elasticache_replication_group with a default value.
ForceNew: true,
},
"engine_version": {
Type: schema.TypeString,
Expand Down Expand Up @@ -139,6 +142,8 @@ func resourceAwsElastiCacheCommonSchema() map[string]*schema.Schema {
func resourceAwsElasticacheCluster() *schema.Resource {
resourceSchema := resourceAwsElastiCacheCommonSchema()

resourceSchema["engine"].Computed = true

resourceSchema["cluster_id"] = &schema.Schema{
Type: schema.TypeString,
Required: true,
Expand All @@ -154,7 +159,8 @@ func resourceAwsElasticacheCluster() *schema.Resource {

resourceSchema["num_cache_nodes"] = &schema.Schema{
Type: schema.TypeInt,
Required: true,
Optional: true,
Computed: true,
}

resourceSchema["az_mode"] = &schema.Schema{
Expand Down Expand Up @@ -184,11 +190,6 @@ func resourceAwsElasticacheCluster() *schema.Resource {
Computed: true,
}

resourceSchema["replication_group_id"] = &schema.Schema{
Type: schema.TypeString,
Computed: true,
}

resourceSchema["cache_nodes"] = &schema.Schema{
Type: schema.TypeList,
Computed: true,
Expand All @@ -214,6 +215,33 @@ func resourceAwsElasticacheCluster() *schema.Resource {
},
}

resourceSchema["replication_group_id"] = &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ConflictsWith: []string{
"availability_zone",
"availability_zones",
"az_mode",
"engine_version",
"engine",
"maintenance_window",
"node_type",
"notification_topic_arn",
"num_cache_nodes",
"parameter_group_name",
"port",
"security_group_ids",
"security_group_names",
"snapshot_arns",
"snapshot_name",
"snapshot_retention_limit",
"snapshot_window",
"subnet_group_name",
},
Computed: true,
}

return &schema.Resource{
Create: resourceAwsElasticacheClusterCreate,
Read: resourceAwsElasticacheClusterRead,
Expand Down Expand Up @@ -309,40 +337,55 @@ func resourceAwsElasticacheCluster() *schema.Resource {
func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).elasticacheconn

clusterId := d.Get("cluster_id").(string)
nodeType := d.Get("node_type").(string) // e.g) cache.m1.small
numNodes := int64(d.Get("num_cache_nodes").(int)) // 2
engine := d.Get("engine").(string) // memcached
engineVersion := d.Get("engine_version").(string) // 1.4.14
subnetGroupName := d.Get("subnet_group_name").(string)
securityNameSet := d.Get("security_group_names").(*schema.Set)
securityIdSet := d.Get("security_group_ids").(*schema.Set)

securityNames := expandStringList(securityNameSet.List())
securityIds := expandStringList(securityIdSet.List())
tags := tagsFromMapEC(d.Get("tags").(map[string]interface{}))

req := &elasticache.CreateCacheClusterInput{
CacheClusterId: aws.String(clusterId),
CacheNodeType: aws.String(nodeType),
NumCacheNodes: aws.Int64(numNodes),
Engine: aws.String(engine),
EngineVersion: aws.String(engineVersion),
CacheSubnetGroupName: aws.String(subnetGroupName),
CacheSecurityGroupNames: securityNames,
SecurityGroupIds: securityIds,
Tags: tags,
req := &elasticache.CreateCacheClusterInput{}

if v, ok := d.GetOk("replication_group_id"); ok {
req.ReplicationGroupId = aws.String(v.(string))
} else {
securityNameSet := d.Get("security_group_names").(*schema.Set)
securityIdSet := d.Get("security_group_ids").(*schema.Set)
securityNames := expandStringList(securityNameSet.List())
securityIds := expandStringList(securityIdSet.List())
tags := tagsFromMapEC(d.Get("tags").(map[string]interface{}))

req.CacheSecurityGroupNames = securityNames
req.SecurityGroupIds = securityIds
req.Tags = tags
}

// parameter groups are optional and can be defaulted by AWS
if v, ok := d.GetOk("parameter_group_name"); ok {
req.CacheParameterGroupName = aws.String(v.(string))
if v, ok := d.GetOk("cluster_id"); ok {
req.CacheClusterId = aws.String(v.(string))
}

if v, ok := d.GetOk("node_type"); ok {
req.CacheNodeType = aws.String(v.(string))
}

if v, ok := d.GetOk("num_cache_nodes"); ok {
req.NumCacheNodes = aws.Int64(int64(v.(int)))
}

if v, ok := d.GetOk("engine"); ok {
req.Engine = aws.String(v.(string))
}

if v, ok := d.GetOk("engine_version"); ok {
req.EngineVersion = aws.String(v.(string))
}

if v, ok := d.GetOk("port"); ok {
req.Port = aws.Int64(int64(v.(int)))
}

if v, ok := d.GetOk("subnet_group_name"); ok {
req.CacheSubnetGroupName = aws.String(v.(string))
}

// parameter groups are optional and can be defaulted by AWS
if v, ok := d.GetOk("parameter_group_name"); ok {
req.CacheParameterGroupName = aws.String(v.(string))
}

if v, ok := d.GetOk("snapshot_retention_limit"); ok {
req.SnapshotRetentionLimit = aws.Int64(int64(v.(int)))
}
Expand Down Expand Up @@ -384,10 +427,6 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
req.PreferredAvailabilityZones = azs
}

if v, ok := d.GetOk("replication_group_id"); ok {
req.ReplicationGroupId = aws.String(v.(string))
}

resp, err := conn.CreateCacheCluster(req)
if err != nil {
return fmt.Errorf("Error creating Elasticache: %s", err)
Expand Down
207 changes: 207 additions & 0 deletions aws/resource_aws_elasticache_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -519,6 +519,165 @@ func TestAccAWSElasticacheCluster_NumCacheNodes_Redis_Ec2Classic(t *testing.T) {
})
}

func TestAccAWSElasticacheCluster_ReplicationGroupID_InvalidAttributes(t *testing.T) {
oldvar := os.Getenv("AWS_DEFAULT_REGION")
os.Setenv("AWS_DEFAULT_REGION", "us-east-1")
defer os.Setenv("AWS_DEFAULT_REGION", oldvar)

rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(8))

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccEC2ClassicPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "availability_zone", "us-east-1a"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with availability_zone`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "availability_zones", "${list(\"us-east-1a\", \"us-east-1c\")}"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with availability_zones`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "az_mode", "single-az"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with az_mode`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "engine_version", "3.2.10"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with engine_version`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "engine", "redis"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with engine`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "maintenance_window", "sun:05:00-sun:09:00"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with maintenance_window`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "node_type", "cache.m3.medium"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with node_type`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "notification_topic_arn", "arn:aws:sns:us-east-1:123456789012:topic/non-existent"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with notification_topic_arn`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "num_cache_nodes", "1"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with num_cache_nodes`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "parameter_group_name", "non-existent"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with parameter_group_name`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "port", "6379"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with port`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "security_group_ids", "${list(\"sg-12345678\", \"sg-87654321\")}"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with security_group_ids`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "security_group_names", "${list(\"group1\", \"group2\")}"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with security_group_names`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "snapshot_arns", "${list(\"arn:aws:s3:::my_bucket/snapshot1.rdb\")}"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with snapshot_arns`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "snapshot_name", "arn:aws:s3:::my_bucket/snapshot1.rdb"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with snapshot_name`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "snapshot_retention_limit", "0"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with snapshot_retention_limit`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "snapshot_window", "05:00-09:00"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with snapshot_window`),
},
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, "subnet_group_name", "group1"),
ExpectError: regexp.MustCompile(`"replication_group_id": conflicts with subnet_group_name`),
},
},
})
}

func TestAccAWSElasticacheCluster_ReplicationGroupID_SingleReplica_Ec2Classic(t *testing.T) {
oldvar := os.Getenv("AWS_DEFAULT_REGION")
os.Setenv("AWS_DEFAULT_REGION", "us-east-1")
defer os.Setenv("AWS_DEFAULT_REGION", oldvar)

var cluster elasticache.CacheCluster
var replicationGroup elasticache.ReplicationGroup
rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(7))
clusterResourceName := "aws_elasticache_cluster.replica"
replicationGroupResourceName := "aws_elasticache_replication_group.test"

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccEC2ClassicPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_Replica_Ec2Classic(rName, 1),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheReplicationGroupExists(replicationGroupResourceName, &replicationGroup),
testAccCheckAWSElasticacheClusterExists(clusterResourceName, &cluster),
testAccCheckAWSElasticacheClusterReplicationGroupIDAttribute(&cluster, &replicationGroup),
resource.TestCheckResourceAttr(clusterResourceName, "engine", "redis"),
resource.TestCheckResourceAttr(clusterResourceName, "node_type", "cache.m3.medium"),
resource.TestCheckResourceAttr(clusterResourceName, "parameter_group_name", "default.redis3.2"),
resource.TestCheckResourceAttr(clusterResourceName, "port", "6379"),
),
},
},
})
}

func TestAccAWSElasticacheCluster_ReplicationGroupID_MultipleReplica_Ec2Classic(t *testing.T) {
oldvar := os.Getenv("AWS_DEFAULT_REGION")
os.Setenv("AWS_DEFAULT_REGION", "us-east-1")
defer os.Setenv("AWS_DEFAULT_REGION", oldvar)

var cluster1, cluster2 elasticache.CacheCluster
var replicationGroup elasticache.ReplicationGroup
rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(7))
clusterResourceName1 := "aws_elasticache_cluster.replica.0"
clusterResourceName2 := "aws_elasticache_cluster.replica.1"
replicationGroupResourceName := "aws_elasticache_replication_group.test"

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccEC2ClassicPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSElasticacheClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSElasticacheClusterConfig_ReplicationGroupID_Replica_Ec2Classic(rName, 2),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheReplicationGroupExists(replicationGroupResourceName, &replicationGroup),
testAccCheckAWSElasticacheClusterExists(clusterResourceName1, &cluster1),
testAccCheckAWSElasticacheClusterExists(clusterResourceName2, &cluster2),
testAccCheckAWSElasticacheClusterReplicationGroupIDAttribute(&cluster1, &replicationGroup),
testAccCheckAWSElasticacheClusterReplicationGroupIDAttribute(&cluster2, &replicationGroup),
resource.TestCheckResourceAttr(clusterResourceName1, "engine", "redis"),
resource.TestCheckResourceAttr(clusterResourceName1, "node_type", "cache.m3.medium"),
resource.TestCheckResourceAttr(clusterResourceName1, "parameter_group_name", "default.redis3.2"),
resource.TestCheckResourceAttr(clusterResourceName1, "port", "6379"),
resource.TestCheckResourceAttr(clusterResourceName2, "engine", "redis"),
resource.TestCheckResourceAttr(clusterResourceName2, "node_type", "cache.m3.medium"),
resource.TestCheckResourceAttr(clusterResourceName2, "parameter_group_name", "default.redis3.2"),
resource.TestCheckResourceAttr(clusterResourceName2, "port", "6379"),
),
},
},
})
}

func testAccCheckAWSElasticacheClusterAttributes(v *elasticache.CacheCluster) resource.TestCheckFunc {
return func(s *terraform.State) error {
if v.NotificationConfiguration == nil {
Expand All @@ -533,6 +692,20 @@ func testAccCheckAWSElasticacheClusterAttributes(v *elasticache.CacheCluster) re
}
}

func testAccCheckAWSElasticacheClusterReplicationGroupIDAttribute(cluster *elasticache.CacheCluster, replicationGroup *elasticache.ReplicationGroup) resource.TestCheckFunc {
return func(s *terraform.State) error {
if cluster.ReplicationGroupId == nil {
return errors.New("expected cluster ReplicationGroupId to be set")
}

if aws.StringValue(cluster.ReplicationGroupId) != aws.StringValue(replicationGroup.ReplicationGroupId) {
return errors.New("expected cluster ReplicationGroupId to equal replication group ID")
}

return nil
}
}

func testAccCheckAWSElasticacheClusterNotRecreated(i, j *elasticache.CacheCluster) resource.TestCheckFunc {
return func(s *terraform.State) error {
if aws.TimeValue(i.CacheClusterCreateTime) != aws.TimeValue(j.CacheClusterCreateTime) {
Expand Down Expand Up @@ -1035,3 +1208,37 @@ resource "aws_elasticache_cluster" "bar" {
}
`, rName, numCacheNodes)
}

func testAccAWSElasticacheClusterConfig_ReplicationGroupID_InvalidAttribute(rName, attrName, attrValue string) string {
return fmt.Sprintf(`
resource "aws_elasticache_cluster" "replica" {
cluster_id = "%[1]s"
replication_group_id = "non-existent-id"
%[2]s = "%[3]s"
}
`, rName, attrName, attrValue)
}

func testAccAWSElasticacheClusterConfig_ReplicationGroupID_Replica_Ec2Classic(rName string, count int) string {
return fmt.Sprintf(`
resource "aws_elasticache_replication_group" "test" {
replication_group_description = "Terraform Acceptance Testing"
replication_group_id = "%[1]s"
node_type = "cache.m3.medium"
number_cache_clusters = 1
parameter_group_name = "default.redis3.2"
port = 6379

lifecycle {
ignore_changes = ["number_cache_clusters"]
}
}

resource "aws_elasticache_cluster" "replica" {
count = %[2]d

cluster_id = "%[1]s${count.index}"
replication_group_id = "${aws_elasticache_replication_group.test.id}"
}
`, rName, count)
}
Loading