From dfb4c4baca668498143d176dd808692bc366c71e Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 19 Oct 2022 11:26:47 -0700 Subject: [PATCH 1/9] Adds read-only node group information --- .../elasticache/global_replication_group.go | 63 ++++++++++++++++++- .../global_replication_group_test.go | 12 ++-- ...che_global_replication_group.html.markdown | 5 ++ 3 files changed, 73 insertions(+), 7 deletions(-) diff --git a/internal/service/elasticache/global_replication_group.go b/internal/service/elasticache/global_replication_group.go index ddd5482e802..b58596de30f 100644 --- a/internal/service/elasticache/global_replication_group.go +++ b/internal/service/elasticache/global_replication_group.go @@ -99,6 +99,22 @@ func ResourceGlobalReplicationGroup() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "global_node_groups": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "global_node_group_id": { + Type: schema.TypeString, + Computed: true, + }, + "slots": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "global_replication_group_id": { Type: schema.TypeString, Computed: true, @@ -136,6 +152,10 @@ func ResourceGlobalReplicationGroup() *schema.Resource { // }, // }, // }, + "num_node_groups": { + Type: schema.TypeInt, + Computed: true, + }, "parameter_group_name": { Type: schema.TypeString, Optional: true, @@ -349,11 +369,14 @@ func resourceGlobalReplicationGroupRead(ctx context.Context, d *schema.ResourceD d.Set("global_replication_group_id", globalReplicationGroup.GlobalReplicationGroupId) d.Set("transit_encryption_enabled", globalReplicationGroup.TransitEncryptionEnabled) - err = setEngineVersionRedis(d, globalReplicationGroup.EngineVersion) - if err != nil { + if err := setEngineVersionRedis(d, globalReplicationGroup.EngineVersion); err != nil { return diag.Errorf("reading ElastiCache Replication Group (%s): %s", d.Id(), err) } + if err := d.Set("global_node_groups", flattenGlobalNodeGroups(globalReplicationGroup.GlobalNodeGroups)); err != nil { + return diag.Errorf("setting global_node_groups: %s", err) + } + d.Set("num_node_groups", len(globalReplicationGroup.GlobalNodeGroups)) d.Set("automatic_failover_enabled", flattenGlobalReplicationGroupAutomaticFailoverEnabled(globalReplicationGroup.Members)) d.Set("primary_replication_group_id", flattenGlobalReplicationGroupPrimaryGroupID(globalReplicationGroup.Members)) @@ -519,6 +542,42 @@ func flattenGlobalReplicationGroupAutomaticFailoverEnabled(members []*elasticach return aws.StringValue(member.AutomaticFailover) == elasticache.AutomaticFailoverStatusEnabled } +func flattenGlobalNodeGroups(nodeGroups []*elasticache.GlobalNodeGroup) []any { + if len(nodeGroups) == 0 { + return nil + } + + var l []any + + for _, nodeGroup := range nodeGroups { + if nodeGroup == nil { + continue + } + + l = append(l, flattenGlobalNodeGroup(nodeGroup)) + } + + return l +} + +func flattenGlobalNodeGroup(nodeGroup *elasticache.GlobalNodeGroup) map[string]any { + if nodeGroup == nil { + return nil + } + + m := map[string]interface{}{} + + if v := nodeGroup.GlobalNodeGroupId; v != nil { + m["global_node_group_id"] = aws.StringValue(v) + } + + if v := nodeGroup.Slots; v != nil { + m["slots"] = aws.StringValue(v) + } + + return m +} + func flattenGlobalReplicationGroupPrimaryGroupID(members []*elasticache.GlobalReplicationGroupMember) string { for _, member := range members { if aws.StringValue(member.Role) == GlobalReplicationGroupMemberRolePrimary { diff --git a/internal/service/elasticache/global_replication_group_test.go b/internal/service/elasticache/global_replication_group_test.go index 0f31d085338..a80d2467a7f 100644 --- a/internal/service/elasticache/global_replication_group_test.go +++ b/internal/service/elasticache/global_replication_group_test.go @@ -59,6 +59,8 @@ func TestAccElastiCacheGlobalReplicationGroup_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "global_replication_group_id_suffix", rName), resource.TestMatchResourceAttr(resourceName, "global_replication_group_id", regexp.MustCompile(tfelasticache.GlobalReplicationGroupRegionPrefixFormat+rName)), resource.TestCheckResourceAttr(resourceName, "global_replication_group_description", tfelasticache.EmptyDescription), + resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "0"), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", "0"), resource.TestCheckResourceAttr(resourceName, "primary_replication_group_id", primaryReplicationGroupId), resource.TestCheckResourceAttr(resourceName, "transit_encryption_enabled", "false"), ), @@ -501,7 +503,7 @@ func TestAccElastiCacheGlobalReplicationGroup_ReplaceSecondary_differentRegion(t }) } -func TestAccElastiCacheGlobalReplicationGroup_clusterMode(t *testing.T) { +func TestAccElastiCacheGlobalReplicationGroup_clusterMode_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -527,6 +529,8 @@ func TestAccElastiCacheGlobalReplicationGroup_clusterMode(t *testing.T) { testAccCheckReplicationGroupExists(primaryReplicationGroupResourceName, &primaryReplicationGroup), resource.TestCheckResourceAttrPair(resourceName, "automatic_failover_enabled", primaryReplicationGroupResourceName, "automatic_failover_enabled"), resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttrPair(resourceName, "global_node_groups.#", primaryReplicationGroupResourceName, "num_node_groups"), + resource.TestCheckResourceAttrPair(resourceName, "num_node_groups", primaryReplicationGroupResourceName, "num_node_groups"), ), }, { @@ -1601,10 +1605,8 @@ resource "aws_elasticache_replication_group" "test" { parameter_group_name = "default.redis6.x.cluster.on" automatic_failover_enabled = true - cluster_mode { - num_node_groups = 2 - replicas_per_node_group = 1 - } + num_node_groups = 2 + replicas_per_node_group = 1 } `, rName) } diff --git a/website/docs/r/elasticache_global_replication_group.html.markdown b/website/docs/r/elasticache_global_replication_group.html.markdown index 38c30b9b478..23c728927df 100644 --- a/website/docs/r/elasticache_global_replication_group.html.markdown +++ b/website/docs/r/elasticache_global_replication_group.html.markdown @@ -133,6 +133,11 @@ In addition to all arguments above, the following attributes are exported: * `cluster_enabled` - Indicates whether the Global Datastore is cluster enabled. * `engine` - The name of the cache engine to be used for the clusters in this global replication group. * `global_replication_group_id` - The full ID of the global replication group. +* `global_node_groups` - Set of node groups (shards) on the global replication group. + Has the values: + * `global_node_group_id` - The ID of the global node group. + * `slots` - The keyspace for this node group. +* `num_node_groups` - The number of node groups (shards) on the global replication group. * `transit_encryption_enabled` - A flag that indicates whether the encryption in transit is enabled. ## Import From ee1d5dfb4a25ddfc02ca0d476ffbf6927a3c09d2 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 20 Oct 2022 10:50:18 -0700 Subject: [PATCH 2/9] Updates `num_node_groups` to allow setting on creation --- .../elasticache/global_replication_group.go | 63 ++++++- .../global_replication_group_test.go | 159 ++++++++++++++++++ ...che_global_replication_group.html.markdown | 2 +- 3 files changed, 221 insertions(+), 3 deletions(-) diff --git a/internal/service/elasticache/global_replication_group.go b/internal/service/elasticache/global_replication_group.go index b58596de30f..0c542e43092 100644 --- a/internal/service/elasticache/global_replication_group.go +++ b/internal/service/elasticache/global_replication_group.go @@ -6,6 +6,8 @@ import ( "fmt" "log" "regexp" + "sort" + "strconv" "time" "github.com/aws/aws-sdk-go/aws" @@ -16,6 +18,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -153,8 +156,10 @@ func ResourceGlobalReplicationGroup() *schema.Resource { // }, // }, "num_node_groups": { - Type: schema.TypeInt, - Computed: true, + Type: schema.TypeInt, + Computed: true, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), }, "parameter_group_name": { Type: schema.TypeString, @@ -337,9 +342,63 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc } } + if v, ok := d.GetOk("num_node_groups"); ok { + requested := v.(int) + current := len(globalReplicationGroup.GlobalNodeGroups) + + if requested != current { + if requested > current { + input := &elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + GlobalReplicationGroupId: aws.String(d.Id()), + NodeGroupCount: aws.Int64(int64(requested)), + } + _, err := conn.IncreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + if err != nil { + return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups on creation: %s", d.Id(), err) + } + } else if requested < current { + var ids []string + for _, v := range globalReplicationGroup.GlobalNodeGroups { + ids = append(ids, aws.StringValue(v.GlobalNodeGroupId)) + } + sort.Slice(ids, func(i, j int) bool { + return globalReplicationGroupNodeNumber(ids[i]) < globalReplicationGroupNodeNumber(ids[j]) + }) + ids = ids[:requested] + + input := &elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + GlobalReplicationGroupId: aws.String(d.Id()), + NodeGroupCount: aws.Int64(int64(requested)), + GlobalNodeGroupsToRetain: aws.StringSlice(ids), + } + _, err := conn.DecreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + if err != nil { + return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups on creation: %s", d.Id(), err) + } + } + + if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), GlobalReplicationGroupDefaultUpdatedTimeout); err != nil { + return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups on creation: waiting for completion: %s", d.Id(), err) + } + } + } + return resourceGlobalReplicationGroupRead(ctx, d, meta) } +func globalReplicationGroupNodeNumber(id string) int { + re := regexp.MustCompile(`^.+-0{0-3}(\d+)$`) + matches := re.FindStringSubmatch(id) + if len(matches) == 2 { + if v, err := strconv.Atoi(matches[1]); err == nil { + return v + } + } + return 0 +} + func resourceGlobalReplicationGroupRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*conns.AWSClient).ElastiCacheConn diff --git a/internal/service/elasticache/global_replication_group_test.go b/internal/service/elasticache/global_replication_group_test.go index a80d2467a7f..f4587c9a038 100644 --- a/internal/service/elasticache/global_replication_group_test.go +++ b/internal/service/elasticache/global_replication_group_test.go @@ -542,6 +542,136 @@ func TestAccElastiCacheGlobalReplicationGroup_clusterMode_basic(t *testing.T) { }) } +func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnCreate_NoChange(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var globalReplicationGroup elasticache.GlobalReplicationGroup + var primaryReplicationGroup elasticache.ReplicationGroup + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resourceName := "aws_elasticache_global_replication_group.test" + primaryReplicationGroupResourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); testAccPreCheckGlobalReplicationGroup(t) }, + ErrorCheck: acctest.ErrorCheck(t, elasticache.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGlobalReplicationGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlobalReplicationGroupConfig_numNodeGroups(rName, 2, 2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(resourceName, &globalReplicationGroup), + testAccCheckReplicationGroupExists(primaryReplicationGroupResourceName, &primaryReplicationGroup), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "2"), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", "2"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, "num_node_groups", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnCreate_Increase(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var globalReplicationGroup elasticache.GlobalReplicationGroup + var primaryReplicationGroup elasticache.ReplicationGroup + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resourceName := "aws_elasticache_global_replication_group.test" + primaryReplicationGroupResourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); testAccPreCheckGlobalReplicationGroup(t) }, + ErrorCheck: acctest.ErrorCheck(t, elasticache.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGlobalReplicationGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlobalReplicationGroupConfig_numNodeGroups(rName, 2, 3), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(resourceName, &globalReplicationGroup), + testAccCheckReplicationGroupExists(primaryReplicationGroupResourceName, &primaryReplicationGroup), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "3"), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0001$", rName)), + }), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0002$", rName)), + }), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0003$", rName)), + }), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", "3"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, "num_node_groups", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnCreate_Decrease(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var globalReplicationGroup elasticache.GlobalReplicationGroup + var primaryReplicationGroup elasticache.ReplicationGroup + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resourceName := "aws_elasticache_global_replication_group.test" + primaryReplicationGroupResourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); testAccPreCheckGlobalReplicationGroup(t) }, + ErrorCheck: acctest.ErrorCheck(t, elasticache.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGlobalReplicationGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlobalReplicationGroupConfig_numNodeGroups(rName, 3, 1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(resourceName, &globalReplicationGroup), + testAccCheckReplicationGroupExists(primaryReplicationGroupResourceName, &primaryReplicationGroup), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "1"), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0001$", rName)), + "slots": regexp.MustCompile("^0-16383$"), // all of them + }), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", "1"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, "num_node_groups", "3"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnCreate_NoChange_v6(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -1611,6 +1741,35 @@ resource "aws_elasticache_replication_group" "test" { `, rName) } +func testAccGlobalReplicationGroupConfig_numNodeGroups(rName string, numNodeGroups, globalNumNodeGroups int) string { + return fmt.Sprintf(` +resource "aws_elasticache_global_replication_group" "test" { + global_replication_group_id_suffix = %[1]q + primary_replication_group_id = aws_elasticache_replication_group.test.id + + num_node_groups = %[3]d +} + +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test" + + engine = "redis" + engine_version = "6.2" + node_type = "cache.m5.large" + + parameter_group_name = "default.redis6.x.cluster.on" + automatic_failover_enabled = true + num_node_groups = %[2]d + replicas_per_node_group = 1 + + lifecycle { + ignore_changes = [member_clusters,num_node_groups] + } +} +`, rName, numNodeGroups, globalNumNodeGroups) +} + func testAccGlobalReplicationGroupConfig_engineVersionInherit(rName, primaryReplicationGroupId, repGroupEngineVersion string) string { return fmt.Sprintf(` resource "aws_elasticache_global_replication_group" "test" { diff --git a/website/docs/r/elasticache_global_replication_group.html.markdown b/website/docs/r/elasticache_global_replication_group.html.markdown index 23c728927df..7d8433658d2 100644 --- a/website/docs/r/elasticache_global_replication_group.html.markdown +++ b/website/docs/r/elasticache_global_replication_group.html.markdown @@ -116,6 +116,7 @@ The following arguments are supported: * `global_replication_group_id_suffix` – (Required) The suffix name of a Global Datastore. If `global_replication_group_id_suffix` is changed, creates a new resource. * `primary_replication_group_id` – (Required) The ID of the primary cluster that accepts writes and will replicate updates to the secondary cluster. If `primary_replication_group_id` is changed, creates a new resource. * `global_replication_group_description` – (Optional) A user-created description for the global replication group. +* `num_node_groups` - (Optional) The number of node groups (shards) on the global replication group. * `parameter_group_name` - (Optional) An ElastiCache Parameter Group to use for the Global Replication Group. Required when upgrading a major engine version, but will be ignored if left configured after the upgrade is complete. Specifying without a major version upgrade will fail. @@ -137,7 +138,6 @@ In addition to all arguments above, the following attributes are exported: Has the values: * `global_node_group_id` - The ID of the global node group. * `slots` - The keyspace for this node group. -* `num_node_groups` - The number of node groups (shards) on the global replication group. * `transit_encryption_enabled` - A flag that indicates whether the encryption in transit is enabled. ## Import From a558fd853c5aae1f09840c873e2f74076c7ed1a7 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 20 Oct 2022 10:50:56 -0700 Subject: [PATCH 3/9] Adds missing documentation for `aws_elasticache_replication_group` --- website/docs/r/elasticache_replication_group.html.markdown | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index 42f9a48a642..887f6596868 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -198,9 +198,14 @@ The following arguments are optional: * `notification_topic_arn` – (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic` * `number_cache_clusters` - (Optional, **Deprecated** use `num_cache_clusters` instead) Number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications. Conflicts with `num_cache_clusters`, `num_node_groups`, or the deprecated `cluster_mode`. Defaults to `1`. * `num_cache_clusters` - (Optional) Number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications. Conflicts with `num_node_groups`, the deprecated`number_cache_clusters`, or the deprecated `cluster_mode`. Defaults to `1`. +* `num_node_groups` - (Optional) Number of node groups (shards) for this Redis replication group. + Changing this number will trigger a resizing operation before other settings modifications. * `parameter_group_name` - (Optional) Name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. To enable "cluster mode", i.e., data sharding, use a parameter group that has the parameter `cluster-enabled` set to true. * `port` – (Optional) Port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. * `preferred_cache_cluster_azs` - (Optional) List of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is considered. The first item in the list will be the primary node. Ignored when updating. +* `replicas_per_node_group` - (Optional) Number of replica nodes in each node group. + Changing this number will trigger a resizing operation before other settings modifications. + Valid values are 0 to 5. * `security_group_ids` - (Optional) One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud * `security_group_names` - (Optional) List of cache security group names to associate with this replication group. * `snapshot_arns` – (Optional) List of ARNs that identify Redis RDB snapshot files stored in Amazon S3. The names object names cannot contain any commas. From 9130ea50c2529948280a3bf9bebdce8dc0eaef09 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 24 Oct 2022 14:16:04 -0700 Subject: [PATCH 4/9] Updates `num_node_groups` to allow updating --- .../elasticache/global_replication_group.go | 57 ++++++- .../global_replication_group_test.go | 154 +++++++++++++++++- 2 files changed, 208 insertions(+), 3 deletions(-) diff --git a/internal/service/elasticache/global_replication_group.go b/internal/service/elasticache/global_replication_group.go index 0c542e43092..e5924858307 100644 --- a/internal/service/elasticache/global_replication_group.go +++ b/internal/service/elasticache/global_replication_group.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "golang.org/x/exp/slices" ) const ( @@ -177,9 +178,10 @@ func ResourceGlobalReplicationGroup() *schema.Resource { }, }, - CustomizeDiff: customdiff.Sequence( + CustomizeDiff: customdiff.All( customizeDiffGlobalReplicationGroupEngineVersionErrorOnDowngrade, customizeDiffGlobalReplicationGroupParamGroupNameRequiresMajorVersionUpgrade, + customdiff.ComputedIf("global_node_groups", diffHasChange("num_node_groups")), ), } } @@ -343,8 +345,8 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc } if v, ok := d.GetOk("num_node_groups"); ok { - requested := v.(int) current := len(globalReplicationGroup.GlobalNodeGroups) + requested := v.(int) if requested != current { if requested > current { @@ -488,6 +490,51 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc } } + if d.HasChange("num_node_groups") { + o, n := d.GetChange("num_node_groups") + current := o.(int) + requested := n.(int) + + if requested != current { + if requested > current { + input := &elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + GlobalReplicationGroupId: aws.String(d.Id()), + NodeGroupCount: aws.Int64(int64(requested)), + } + _, err := conn.IncreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + if err != nil { + return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups: %s", d.Id(), err) + } + } else if requested < current { + var ids []string + for _, v := range d.Get("global_node_groups").(*schema.Set).List() { + v := v.(map[string]any) + ids = append(ids, v["global_node_group_id"].(string)) + } + slices.SortFunc(ids, func(a, b string) bool { + return globalReplicationGroupNodeNumber(a) < globalReplicationGroupNodeNumber(b) + }) + ids = ids[:requested] + + input := &elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + GlobalReplicationGroupId: aws.String(d.Id()), + NodeGroupCount: aws.Int64(int64(requested)), + GlobalNodeGroupsToRemove: aws.StringSlice(ids), + } + _, err := conn.DecreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + if err != nil { + return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups: %s", d.Id(), err) + } + } + + if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), GlobalReplicationGroupDefaultUpdatedTimeout); err != nil { + return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups: waiting for completion: %s", d.Id(), err) + } + } + } + return resourceGlobalReplicationGroupRead(ctx, d, meta) } @@ -645,3 +692,9 @@ func flattenGlobalReplicationGroupPrimaryGroupID(members []*elasticache.GlobalRe } return "" } + +func diffHasChange(key ...string) customdiff.ResourceConditionFunc { + return func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool { + return diff.HasChanges(key...) + } +} diff --git a/internal/service/elasticache/global_replication_group_test.go b/internal/service/elasticache/global_replication_group_test.go index f4587c9a038..fa151a926a9 100644 --- a/internal/service/elasticache/global_replication_group_test.go +++ b/internal/service/elasticache/global_replication_group_test.go @@ -672,6 +672,131 @@ func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnCreate_Decrease( }) } +func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnUpdate_Increase(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var globalReplicationGroup elasticache.GlobalReplicationGroup + var primaryReplicationGroup elasticache.ReplicationGroup + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resourceName := "aws_elasticache_global_replication_group.test" + primaryReplicationGroupResourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); testAccPreCheckGlobalReplicationGroup(t) }, + ErrorCheck: acctest.ErrorCheck(t, elasticache.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGlobalReplicationGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlobalReplicationGroupConfig_numNodeGroups_inherit(rName, 2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(resourceName, &globalReplicationGroup), + testAccCheckReplicationGroupExists(primaryReplicationGroupResourceName, &primaryReplicationGroup), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "2"), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0001$", rName)), + }), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0002$", rName)), + }), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", "2"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, "num_node_groups", "2"), + ), + }, + { + Config: testAccGlobalReplicationGroupConfig_numNodeGroups(rName, 2, 3), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(resourceName, &globalReplicationGroup), + testAccCheckReplicationGroupExists(primaryReplicationGroupResourceName, &primaryReplicationGroup), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "3"), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0001$", rName)), + }), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0002$", rName)), + }), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0003$", rName)), + }), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", "3"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, "num_node_groups", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnUpdate_Decrease(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var globalReplicationGroup elasticache.GlobalReplicationGroup + var primaryReplicationGroup elasticache.ReplicationGroup + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resourceName := "aws_elasticache_global_replication_group.test" + primaryReplicationGroupResourceName := "aws_elasticache_replication_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); testAccPreCheckGlobalReplicationGroup(t) }, + ErrorCheck: acctest.ErrorCheck(t, elasticache.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGlobalReplicationGroupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccGlobalReplicationGroupConfig_numNodeGroups_inherit(rName, 2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(resourceName, &globalReplicationGroup), + testAccCheckReplicationGroupExists(primaryReplicationGroupResourceName, &primaryReplicationGroup), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "2"), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0001$", rName)), + }), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0002$", rName)), + }), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", "2"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, "num_node_groups", "2"), + ), + }, + { + Config: testAccGlobalReplicationGroupConfig_numNodeGroups(rName, 2, 1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGlobalReplicationGroupExists(resourceName, &globalReplicationGroup), + testAccCheckReplicationGroupExists(primaryReplicationGroupResourceName, &primaryReplicationGroup), + resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "1"), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ + // There is no guarantee which node group will be deleted, so we can't check here + "slots": regexp.MustCompile("^0-16383$"), // all slots + }), + resource.TestCheckResourceAttr(resourceName, "num_node_groups", "1"), + resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, "num_node_groups", "2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnCreate_NoChange_v6(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -1741,6 +1866,33 @@ resource "aws_elasticache_replication_group" "test" { `, rName) } +func testAccGlobalReplicationGroupConfig_numNodeGroups_inherit(rName string, numNodeGroups int) string { + return fmt.Sprintf(` +resource "aws_elasticache_global_replication_group" "test" { + global_replication_group_id_suffix = %[1]q + primary_replication_group_id = aws_elasticache_replication_group.test.id +} + +resource "aws_elasticache_replication_group" "test" { + replication_group_id = %[1]q + replication_group_description = "test" + + engine = "redis" + engine_version = "6.2" + node_type = "cache.m5.large" + + parameter_group_name = "default.redis6.x.cluster.on" + automatic_failover_enabled = true + num_node_groups = %[2]d + replicas_per_node_group = 1 + + lifecycle { + ignore_changes = [member_clusters, num_node_groups] + } +} +`, rName, numNodeGroups) +} + func testAccGlobalReplicationGroupConfig_numNodeGroups(rName string, numNodeGroups, globalNumNodeGroups int) string { return fmt.Sprintf(` resource "aws_elasticache_global_replication_group" "test" { @@ -1764,7 +1916,7 @@ resource "aws_elasticache_replication_group" "test" { replicas_per_node_group = 1 lifecycle { - ignore_changes = [member_clusters,num_node_groups] + ignore_changes = [member_clusters, num_node_groups] } } `, rName, numNodeGroups, globalNumNodeGroups) From a3241a03b341e0913f6cba0208cbb4cd45e0863c Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Mon, 24 Oct 2022 14:54:58 -0700 Subject: [PATCH 5/9] Cleans up update handling --- .../elasticache/global_replication_group.go | 89 +++++++++---------- .../global_replication_group_test.go | 6 +- 2 files changed, 44 insertions(+), 51 deletions(-) diff --git a/internal/service/elasticache/global_replication_group.go b/internal/service/elasticache/global_replication_group.go index e5924858307..767559940ef 100644 --- a/internal/service/elasticache/global_replication_group.go +++ b/internal/service/elasticache/global_replication_group.go @@ -6,7 +6,6 @@ import ( "fmt" "log" "regexp" - "sort" "strconv" "time" @@ -350,12 +349,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc if requested != current { if requested > current { - input := &elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - GlobalReplicationGroupId: aws.String(d.Id()), - NodeGroupCount: aws.Int64(int64(requested)), - } - _, err := conn.IncreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + err := globalReplcationGroupNodeGroupIncrease(ctx, conn, d.Id(), requested) if err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups on creation: %s", d.Id(), err) } @@ -364,18 +358,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc for _, v := range globalReplicationGroup.GlobalNodeGroups { ids = append(ids, aws.StringValue(v.GlobalNodeGroupId)) } - sort.Slice(ids, func(i, j int) bool { - return globalReplicationGroupNodeNumber(ids[i]) < globalReplicationGroupNodeNumber(ids[j]) - }) - ids = ids[:requested] - - input := &elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - GlobalReplicationGroupId: aws.String(d.Id()), - NodeGroupCount: aws.Int64(int64(requested)), - GlobalNodeGroupsToRetain: aws.StringSlice(ids), - } - _, err := conn.DecreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + err := globalReplicationGroupNodeGroupDecrease(ctx, conn, d.Id(), requested, ids) if err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups on creation: %s", d.Id(), err) } @@ -390,17 +373,6 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc return resourceGlobalReplicationGroupRead(ctx, d, meta) } -func globalReplicationGroupNodeNumber(id string) int { - re := regexp.MustCompile(`^.+-0{0-3}(\d+)$`) - matches := re.FindStringSubmatch(id) - if len(matches) == 2 { - if v, err := strconv.Atoi(matches[1]); err == nil { - return v - } - } - return 0 -} - func resourceGlobalReplicationGroupRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { conn := meta.(*conns.AWSClient).ElastiCacheConn @@ -497,12 +469,7 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc if requested != current { if requested > current { - input := &elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - GlobalReplicationGroupId: aws.String(d.Id()), - NodeGroupCount: aws.Int64(int64(requested)), - } - _, err := conn.IncreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + err := globalReplcationGroupNodeGroupIncrease(ctx, conn, d.Id(), requested) if err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups: %s", d.Id(), err) } @@ -512,18 +479,7 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc v := v.(map[string]any) ids = append(ids, v["global_node_group_id"].(string)) } - slices.SortFunc(ids, func(a, b string) bool { - return globalReplicationGroupNodeNumber(a) < globalReplicationGroupNodeNumber(b) - }) - ids = ids[:requested] - - input := &elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput{ - ApplyImmediately: aws.Bool(true), - GlobalReplicationGroupId: aws.String(d.Id()), - NodeGroupCount: aws.Int64(int64(requested)), - GlobalNodeGroupsToRemove: aws.StringSlice(ids), - } - _, err := conn.DecreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + err := globalReplicationGroupNodeGroupDecrease(ctx, conn, d.Id(), requested, ids) if err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups: %s", d.Id(), err) } @@ -693,6 +649,43 @@ func flattenGlobalReplicationGroupPrimaryGroupID(members []*elasticache.GlobalRe return "" } +func globalReplcationGroupNodeGroupIncrease(ctx context.Context, conn *elasticache.ElastiCache, id string, requested int) error { + input := &elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + GlobalReplicationGroupId: aws.String(id), + NodeGroupCount: aws.Int64(int64(requested)), + } + _, err := conn.IncreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + return err +} + +func globalReplicationGroupNodeGroupDecrease(ctx context.Context, conn *elasticache.ElastiCache, id string, requested int, nodeGroupIDs []string) error { + slices.SortFunc(nodeGroupIDs, func(a, b string) bool { + return globalReplicationGroupNodeNumber(a) < globalReplicationGroupNodeNumber(b) + }) + nodeGroupIDs = nodeGroupIDs[:requested] + + input := &elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput{ + ApplyImmediately: aws.Bool(true), + GlobalReplicationGroupId: aws.String(id), + NodeGroupCount: aws.Int64(int64(requested)), + GlobalNodeGroupsToRetain: aws.StringSlice(nodeGroupIDs), + } + _, err := conn.DecreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + return err +} + +func globalReplicationGroupNodeNumber(id string) int { + re := regexp.MustCompile(`^.+-0{0,3}(\d+)$`) + matches := re.FindStringSubmatch(id) + if len(matches) == 2 { + if v, err := strconv.Atoi(matches[1]); err == nil { + return v + } + } + return 0 +} + func diffHasChange(key ...string) customdiff.ResourceConditionFunc { return func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool { return diff.HasChanges(key...) diff --git a/internal/service/elasticache/global_replication_group_test.go b/internal/service/elasticache/global_replication_group_test.go index fa151a926a9..f7a224326e5 100644 --- a/internal/service/elasticache/global_replication_group_test.go +++ b/internal/service/elasticache/global_replication_group_test.go @@ -657,7 +657,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnCreate_Decrease( resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "1"), resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0001$", rName)), - "slots": regexp.MustCompile("^0-16383$"), // all of them + "slots": regexp.MustCompile("^0-16383$"), // all slots }), resource.TestCheckResourceAttr(resourceName, "num_node_groups", "1"), resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, "num_node_groups", "3"), @@ -781,8 +781,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnUpdate_Decrease( resource.TestCheckResourceAttr(resourceName, "cluster_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "global_node_groups.#", "1"), resource.TestMatchTypeSetElemNestedAttrs(resourceName, "global_node_groups.*", map[string]*regexp.Regexp{ - // There is no guarantee which node group will be deleted, so we can't check here - "slots": regexp.MustCompile("^0-16383$"), // all slots + "global_node_group_id": regexp.MustCompile(fmt.Sprintf("^[a-z]+-%s-0001$", rName)), + "slots": regexp.MustCompile("^0-16383$"), // all slots }), resource.TestCheckResourceAttr(resourceName, "num_node_groups", "1"), resource.TestCheckResourceAttr(primaryReplicationGroupResourceName, "num_node_groups", "2"), From d7df204db76d0df40afb727e078017c60ce94b08 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 25 Oct 2022 14:15:59 -0700 Subject: [PATCH 6/9] Adds configurable timeouts --- .../elasticache/global_replication_group.go | 40 +++++++++++-------- .../service/elasticache/replication_group.go | 2 +- internal/service/elasticache/wait.go | 12 +++--- ...che_global_replication_group.html.markdown | 8 ++++ 4 files changed, 38 insertions(+), 24 deletions(-) diff --git a/internal/service/elasticache/global_replication_group.go b/internal/service/elasticache/global_replication_group.go index 767559940ef..277da42e253 100644 --- a/internal/service/elasticache/global_replication_group.go +++ b/internal/service/elasticache/global_replication_group.go @@ -177,6 +177,12 @@ func ResourceGlobalReplicationGroup() *schema.Resource { }, }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(globalReplicationGroupDefaultCreatedTimeout), + Update: schema.DefaultTimeout(globalReplicationGroupDefaultUpdatedTimeout), + Delete: schema.DefaultTimeout(globalReplicationGroupDefaultDeletedTimeout), + }, + CustomizeDiff: customdiff.All( customizeDiffGlobalReplicationGroupEngineVersionErrorOnDowngrade, customizeDiffGlobalReplicationGroupParamGroupNameRequiresMajorVersionUpgrade, @@ -284,7 +290,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc d.SetId(aws.StringValue(output.GlobalReplicationGroup.GlobalReplicationGroupId)) - globalReplicationGroup, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), GlobalReplicationGroupDefaultCreatedTimeout) + globalReplicationGroup, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) if err != nil { return diag.Errorf("waiting for ElastiCache Global Replication Group (%s) creation: %s", d.Id(), err) } @@ -293,7 +299,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc if v := v.(bool); v == flattenGlobalReplicationGroupAutomaticFailoverEnabled(globalReplicationGroup.Members) { log.Printf("[DEBUG] Not updating ElastiCache Global Replication Group (%s) automatic failover: no change from %t", d.Id(), v) } else { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationAutomaticFailoverUpdater(v)); err != nil { + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationAutomaticFailoverUpdater(v), d.Timeout(schema.TimeoutCreate)); err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) automatic failover on creation: %s", d.Id(), err) } } @@ -303,7 +309,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc if v.(string) == aws.StringValue(globalReplicationGroup.CacheNodeType) { log.Printf("[DEBUG] Not updating ElastiCache Global Replication Group (%s) node type: no change from %q", d.Id(), v) } else { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupNodeTypeUpdater(v.(string))); err != nil { + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupNodeTypeUpdater(v.(string)), d.Timeout(schema.TimeoutCreate)); err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) node type on creation: %s", d.Id(), err) } } @@ -326,7 +332,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc p := d.Get("parameter_group_name").(string) if diff[0] == 1 { - err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(v.(string), p)) + err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(v.(string), p), d.Timeout(schema.TimeoutCreate)) if err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) engine version on creation: %s", d.Id(), err) } @@ -335,7 +341,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc return diag.Errorf("cannot change parameter group name on minor engine version upgrade, upgrading from %s to %s", engineVersion.String(), requestedVersion.String()) } if t, _ := regexp.MatchString(`[6-9]\.x`, v.(string)); !t { - err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(v.(string))) + err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(v.(string)), d.Timeout(schema.TimeoutCreate)) if err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) engine version on creation: %s", d.Id(), err) } @@ -364,7 +370,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc } } - if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), GlobalReplicationGroupDefaultUpdatedTimeout); err != nil { + if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups on creation: waiting for completion: %s", d.Id(), err) } } @@ -424,13 +430,13 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc // Only one field can be changed per request if d.HasChange("cache_node_type") { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupNodeTypeUpdater(d.Get("cache_node_type").(string))); err != nil { + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupNodeTypeUpdater(d.Get("cache_node_type").(string)), d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) node type: %s", d.Id(), err) } } if d.HasChange("automatic_failover_enabled") { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationAutomaticFailoverUpdater(d.Get("automatic_failover_enabled").(bool))); err != nil { + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationAutomaticFailoverUpdater(d.Get("automatic_failover_enabled").(bool)), d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) automatic failover: %s", d.Id(), err) } } @@ -444,12 +450,12 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc diff := diffVersion(newVersion, oldVersion) if diff[0] == 1 { p := d.Get("parameter_group_name").(string) - err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(n.(string), p)) + err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMajorUpdater(n.(string), p), d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s): %s", d.Id(), err) } } else if diff[1] == 1 { - err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(n.(string))) + err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupEngineVersionMinorUpdater(n.(string)), d.Timeout(schema.TimeoutUpdate)) if err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s): %s", d.Id(), err) } @@ -457,7 +463,7 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc } if d.HasChange("global_replication_group_description") { - if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupDescriptionUpdater(d.Get("global_replication_group_description").(string))); err != nil { + if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupDescriptionUpdater(d.Get("global_replication_group_description").(string)), d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) description: %s", d.Id(), err) } } @@ -485,7 +491,7 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc } } - if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), GlobalReplicationGroupDefaultUpdatedTimeout); err != nil { + if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return diag.Errorf("updating ElastiCache Global Replication Group (%s) node groups: waiting for completion: %s", d.Id(), err) } } @@ -525,7 +531,7 @@ func globalReplicationGroupNodeTypeUpdater(nodeType string) globalReplicationGro } } -func updateGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, id string, f globalReplicationGroupUpdater) error { +func updateGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, id string, f globalReplicationGroupUpdater, timeout time.Duration) error { input := &elasticache.ModifyGlobalReplicationGroupInput{ ApplyImmediately: aws.Bool(true), GlobalReplicationGroupId: aws.String(id), @@ -536,7 +542,7 @@ func updateGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiC return err } - if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, id, GlobalReplicationGroupDefaultUpdatedTimeout); err != nil { + if _, err := waitGlobalReplicationGroupAvailable(ctx, conn, id, timeout); err != nil { return fmt.Errorf("waiting for completion: %w", err) } @@ -547,7 +553,7 @@ func resourceGlobalReplicationGroupDelete(ctx context.Context, d *schema.Resourc conn := meta.(*conns.AWSClient).ElastiCacheConn // Using Update timeout because the Global Replication Group could be in the middle of an update operation - err := deleteGlobalReplicationGroup(ctx, conn, d.Id(), GlobalReplicationGroupDefaultUpdatedTimeout) + err := deleteGlobalReplicationGroup(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), d.Timeout(schema.TimeoutDelete)) if err != nil { return diag.Errorf("deleting ElastiCache Global Replication Group: %s", err) } @@ -555,7 +561,7 @@ func resourceGlobalReplicationGroupDelete(ctx context.Context, d *schema.Resourc return nil } -func deleteGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, id string, readyTimeout time.Duration) error { +func deleteGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, id string, readyTimeout, deleteTimeout time.Duration) error { input := &elasticache.DeleteGlobalReplicationGroupInput{ GlobalReplicationGroupId: aws.String(id), RetainPrimaryReplicationGroup: aws.Bool(true), @@ -588,7 +594,7 @@ func deleteGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiC return err } - if _, err := WaitGlobalReplicationGroupDeleted(ctx, conn, id); err != nil { + if _, err := waitGlobalReplicationGroupDeleted(ctx, conn, id, deleteTimeout); err != nil { return fmt.Errorf("waiting for completion: %w", err) } diff --git a/internal/service/elasticache/replication_group.go b/internal/service/elasticache/replication_group.go index 134768a2ef1..f9f46c930ae 100644 --- a/internal/service/elasticache/replication_group.go +++ b/internal/service/elasticache/replication_group.go @@ -588,7 +588,7 @@ func resourceReplicationGroupCreate(d *schema.ResourceData, meta interface{}) er // state, but the global replication group can still be in the "modifying" state. Wait for the replication group // to be fully added to the global replication group. // API calls to the global replication group can be made in any region. - if _, err := waitGlobalReplicationGroupAvailable(context.TODO(), conn, v.(string), GlobalReplicationGroupDefaultCreatedTimeout); err != nil { + if _, err := waitGlobalReplicationGroupAvailable(context.TODO(), conn, v.(string), globalReplicationGroupDefaultCreatedTimeout); err != nil { return fmt.Errorf("error waiting for ElastiCache Global Replication Group (%s) to be available: %w", v, err) } } diff --git a/internal/service/elasticache/wait.go b/internal/service/elasticache/wait.go index ba7c55069d8..46423bb5151 100644 --- a/internal/service/elasticache/wait.go +++ b/internal/service/elasticache/wait.go @@ -151,9 +151,9 @@ func WaitCacheClusterDeleted(conn *elasticache.ElastiCache, cacheClusterID strin } const ( - GlobalReplicationGroupDefaultCreatedTimeout = 25 * time.Minute - GlobalReplicationGroupDefaultUpdatedTimeout = 40 * time.Minute - GlobalReplicationGroupDefaultDeletedTimeout = 20 * time.Minute + globalReplicationGroupDefaultCreatedTimeout = 60 * time.Minute + globalReplicationGroupDefaultUpdatedTimeout = 60 * time.Minute + globalReplicationGroupDefaultDeletedTimeout = 20 * time.Minute globalReplicationGroupAvailableMinTimeout = 10 * time.Second globalReplicationGroupAvailableDelay = 30 * time.Second @@ -181,8 +181,8 @@ func waitGlobalReplicationGroupAvailable(ctx context.Context, conn *elasticache. return nil, err } -// WaitGlobalReplicationGroupDeleted waits for a Global Replication Group to be deleted -func WaitGlobalReplicationGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string) (*elasticache.GlobalReplicationGroup, error) { +// waitGlobalReplicationGroupDeleted waits for a Global Replication Group to be deleted +func waitGlobalReplicationGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string, timeout time.Duration) (*elasticache.GlobalReplicationGroup, error) { stateConf := &resource.StateChangeConf{ Pending: []string{ GlobalReplicationGroupStatusAvailable, @@ -192,7 +192,7 @@ func WaitGlobalReplicationGroupDeleted(ctx context.Context, conn *elasticache.El }, Target: []string{}, Refresh: statusGlobalReplicationGroup(ctx, conn, globalReplicationGroupID), - Timeout: GlobalReplicationGroupDefaultDeletedTimeout, + Timeout: timeout, MinTimeout: globalReplicationGroupDeletedMinTimeout, Delay: globalReplicationGroupDeletedDelay, } diff --git a/website/docs/r/elasticache_global_replication_group.html.markdown b/website/docs/r/elasticache_global_replication_group.html.markdown index 7d8433658d2..040787547b2 100644 --- a/website/docs/r/elasticache_global_replication_group.html.markdown +++ b/website/docs/r/elasticache_global_replication_group.html.markdown @@ -140,6 +140,14 @@ In addition to all arguments above, the following attributes are exported: * `slots` - The keyspace for this node group. * `transit_encryption_enabled` - A flag that indicates whether the encryption in transit is enabled. +## Timeouts + +[Configuration options](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts): + +* `create` - (Default `60m`) +* `update` - (Default `60m`) +* `delete` - (Default `20m`) + ## Import ElastiCache Global Replication Groups can be imported using the `global_replication_group_id`, e.g., From ad4419a91c34bc07d2625c01a4d687ede71b589e Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 26 Oct 2022 17:40:17 -0700 Subject: [PATCH 7/9] Adds CHANGELOG entry --- .changelog/27500.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/27500.txt diff --git a/.changelog/27500.txt b/.changelog/27500.txt new file mode 100644 index 00000000000..0883ab56d1d --- /dev/null +++ b/.changelog/27500.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_elasticache_global_replication_group: Add `global_node_groups` and `num_node_groups` arguments +``` + +```release-note:enhancement +resource/aws_elasticache_global_replication_group: Add timeouts. +``` From 3d46d0ffd6e150c4bdd1d5f3d3a41119957167ad Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 27 Oct 2022 11:43:02 -0700 Subject: [PATCH 8/9] Fixes sweeper compilation --- internal/service/elasticache/sweep.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/elasticache/sweep.go b/internal/service/elasticache/sweep.go index 240b09014c4..c0c22647423 100644 --- a/internal/service/elasticache/sweep.go +++ b/internal/service/elasticache/sweep.go @@ -152,7 +152,7 @@ func sweepGlobalReplicationGroups(region string) error { } log.Printf("[INFO] Deleting ElastiCache Global Replication Group: %s", id) - err := deleteGlobalReplicationGroup(ctx, conn, id, sweeperGlobalReplicationGroupDefaultUpdatedTimeout) + err := deleteGlobalReplicationGroup(ctx, conn, id, sweeperGlobalReplicationGroupDefaultUpdatedTimeout, globalReplicationGroupDefaultDeletedTimeout) if err != nil { sweeperErr := fmt.Errorf("error deleting ElastiCache Global Replication Group (%s): %w", id, err) log.Printf("[ERROR] %s", sweeperErr) From 4d965f6d521183e30e779dddbf3fa4c842dc0b3b Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Thu, 27 Oct 2022 11:43:15 -0700 Subject: [PATCH 9/9] Cleans up errors --- internal/service/elasticache/sweep.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/internal/service/elasticache/sweep.go b/internal/service/elasticache/sweep.go index c0c22647423..72074b559d2 100644 --- a/internal/service/elasticache/sweep.go +++ b/internal/service/elasticache/sweep.go @@ -146,17 +146,13 @@ func sweepGlobalReplicationGroups(region string) error { disassociationErrors := DisassociateMembers(conn, globalReplicationGroup) if disassociationErrors != nil { - sweeperErr := fmt.Errorf("failed to disassociate ElastiCache Global Replication Group (%s) members: %w", id, disassociationErrors) - log.Printf("[ERROR] %s", sweeperErr) - return sweeperErr + return fmt.Errorf("disassociating ElastiCache Global Replication Group (%s) members: %w", id, disassociationErrors) } log.Printf("[INFO] Deleting ElastiCache Global Replication Group: %s", id) err := deleteGlobalReplicationGroup(ctx, conn, id, sweeperGlobalReplicationGroupDefaultUpdatedTimeout, globalReplicationGroupDefaultDeletedTimeout) if err != nil { - sweeperErr := fmt.Errorf("error deleting ElastiCache Global Replication Group (%s): %w", id, err) - log.Printf("[ERROR] %s", sweeperErr) - return sweeperErr + return fmt.Errorf("deleting ElastiCache Global Replication Group (%s): %w", id, err) } return nil }) @@ -173,7 +169,7 @@ func sweepGlobalReplicationGroups(region string) error { } if err != nil { - grgErrs = multierror.Append(grgErrs, fmt.Errorf("error listing ElastiCache Global Replication Groups: %w", err)) + grgErrs = multierror.Append(grgErrs, fmt.Errorf("listing ElastiCache Global Replication Groups: %w", err)) } return grgErrs.ErrorOrNil()