Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

resource/aws_elasticache_replication_group: Validation fixes #38396

Merged
merged 6 commits into from
Jul 17, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions .changelog/38396.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
```release-note:bug
resource/aws_elasticache_replication_group: Requires `description`.
```

```release-note:bug
resource/aws_elasticache_replication_group: `num_cache_clusters` must be at least 2 when `automatic_failover_enabled` is `true`.
```

```release-note:bug
resource/aws_elasticache_replication_group: When `num_cache_clusters` is set, prevents setting `replicas_per_node_group`.
```

```release-note:bug
resource/aws_elasticache_replication_group: Allows setting `replicas_per_node_group` to `0` and sets the maximum to `5`.
```
20 changes: 18 additions & 2 deletions internal/service/elasticache/diff.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
"errors"

awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types"
"github.com/hashicorp/go-cty/cty"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-provider-aws/names"
)
Expand Down Expand Up @@ -58,8 +59,8 @@
return errors.New(`engine "memcached" does not support final_snapshot_identifier`)
}

// customizeDiffValidateReplicationGroupAutomaticFailover validates that `automatic_failover_enabled` is set when `multi_az_enabled` is true
func customizeDiffValidateReplicationGroupAutomaticFailover(_ context.Context, diff *schema.ResourceDiff, v interface{}) error {
// customizeDiffValidateReplicationGroupMultiAZAutomaticFailover validates that `automatic_failover_enabled` is set when `multi_az_enabled` is true
func customizeDiffValidateReplicationGroupMultiAZAutomaticFailover(_ context.Context, diff *schema.ResourceDiff, v interface{}) error {
if v := diff.Get("multi_az_enabled").(bool); !v {
return nil
}
Expand All @@ -68,3 +69,18 @@
}
return nil
}

// customizeDiffValidateReplicationGroupAutomaticFailoverNumCacheClusters validates that `automatic_failover_enabled` is set when `multi_az_enabled` is true
func customizeDiffValidateReplicationGroupAutomaticFailoverNumCacheClusters(_ context.Context, diff *schema.ResourceDiff, v interface{}) error {
if v := diff.Get("automatic_failover_enabled").(bool); !v {
return nil
}
raw := diff.GetRawConfig().GetAttr("num_cache_clusters")
if !raw.IsKnown() || raw.IsNull() {
return nil
}
if raw.GreaterThanOrEqualTo(cty.NumberIntVal(2)).True() {

Check failure on line 82 in internal/service/elasticache/diff.go

View workflow job for this annotation

GitHub Actions / 2 of 2

Magic number: 2, in <argument> detected (mnd)
return nil
}
return errors.New(`"num_cache_clusters": must be at least 2 if automatic_failover_enabled is true`)
}
39 changes: 29 additions & 10 deletions internal/service/elasticache/replication_group.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/elasticache"
awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types"
"github.com/hashicorp/go-cty/cty"
"github.com/hashicorp/go-cty/cty/gocty"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
Expand Down Expand Up @@ -111,8 +113,7 @@ func resourceReplicationGroup() *schema.Resource {
},
names.AttrDescription: {
Type: schema.TypeString,
Optional: true,
Computed: true,
Required: true,
ValidateFunc: validation.StringIsNotEmpty,
},
names.AttrEngine: {
Expand Down Expand Up @@ -235,7 +236,7 @@ func resourceReplicationGroup() *schema.Resource {
Type: schema.TypeInt,
Computed: true,
Optional: true,
ConflictsWith: []string{"num_node_groups"},
ConflictsWith: []string{"num_node_groups", "replicas_per_node_group"},
},
"num_node_groups": {
Type: schema.TypeInt,
Expand Down Expand Up @@ -277,9 +278,11 @@ func resourceReplicationGroup() *schema.Resource {
Computed: true,
},
"replicas_per_node_group": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
Type: schema.TypeInt,
Optional: true,
Computed: true,
ConflictsWith: []string{"num_cache_clusters"},
ValidateFunc: validation.IntBetween(0, 5),
},
"replication_group_id": {
Type: schema.TypeString,
Expand Down Expand Up @@ -385,8 +388,8 @@ func resourceReplicationGroup() *schema.Resource {
Delete: schema.DefaultTimeout(45 * time.Minute),
},

CustomizeDiff: customdiff.Sequence(
customizeDiffValidateReplicationGroupAutomaticFailover,
CustomizeDiff: customdiff.All(
customizeDiffValidateReplicationGroupMultiAZAutomaticFailover,
customizeDiffEngineVersionForceNewOnDowngrade,
customdiff.ComputedIf("member_clusters", func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool {
return diff.HasChange("num_cache_clusters") ||
Expand All @@ -398,6 +401,7 @@ func resourceReplicationGroup() *schema.Resource {
// be configured during creation of the cluster.
return semver.LessThan(d.Get("engine_version_actual").(string), "7.0.5")
}),
customizeDiffValidateReplicationGroupAutomaticFailoverNumCacheClusters,
verify.SetTagsDiff,
),
}
Expand Down Expand Up @@ -513,8 +517,23 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData,
input.PreferredCacheClusterAZs = flex.ExpandStringValueList(v.([]interface{}))
}

if v, ok := d.GetOk("replicas_per_node_group"); ok {
input.ReplicasPerNodeGroup = aws.Int32(int32(v.(int)))
rawConfig := d.GetRawConfig()
rawReplicasPerNodeGroup := rawConfig.GetAttr("replicas_per_node_group")
if rawReplicasPerNodeGroup.IsKnown() && !rawReplicasPerNodeGroup.IsNull() {
var v int32
err := gocty.FromCtyValue(rawReplicasPerNodeGroup, &v)
if err != nil {
path := cty.GetAttrPath("replicas_per_node_group")
diags = append(diags, errs.NewAttributeErrorDiagnostic(
path,
"Invalid Value",
"An unexpected error occurred while reading configuration values. "+
"This is always an error in the provider. "+
"Please report the following to the provider developer:\n\n"+
ewbankkit marked this conversation as resolved.
Show resolved Hide resolved
fmt.Sprintf(`Reading "%s": %s`, errs.PathString(path), err),
))
}
input.ReplicasPerNodeGroup = aws.Int32(v)
}

if v, ok := d.GetOk("subnet_group_name"); ok {
Expand Down
71 changes: 67 additions & 4 deletions internal/service/elasticache/replication_group_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1370,6 +1370,28 @@ func TestAccElastiCacheReplicationGroup_ClusterMode_updateFromDisabled_Compatibl
})
}

func TestAccElastiCacheReplicationGroup_cacheClustersConflictsWithReplicasPerNodeGroup(t *testing.T) {
ctx := acctest.Context(t)
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}

rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t) },
ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckReplicationGroupDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccReplicationGroupConfig_cacheClustersConflictsWithReplicasPerNodeGroup(rName),
ExpectError: regexache.MustCompile(`"replicas_per_node_group": conflicts with num_cache_clusters`),
},
},
})
}

func TestAccElastiCacheReplicationGroup_clusteringAndCacheNodesCausesError(t *testing.T) {
ctx := acctest.Context(t)
rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)
Expand Down Expand Up @@ -1696,7 +1718,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClusters_basic(t *testing.T)
})
}

func TestAccElastiCacheReplicationGroup_NumberCacheClustersFailover_autoFailoverDisabled(t *testing.T) {
func TestAccElastiCacheReplicationGroup_NumberCacheClusters_autoFailoverDisabled(t *testing.T) {
ctx := acctest.Context(t)
if testing.Short() {
t.Skip("skipping long-running test in short mode")
Expand Down Expand Up @@ -1754,7 +1776,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersFailover_autoFailover
})
}

func TestAccElastiCacheReplicationGroup_NumberCacheClustersFailover_autoFailoverEnabled(t *testing.T) {
func TestAccElastiCacheReplicationGroup_NumberCacheClusters_autoFailoverEnabled(t *testing.T) {
ctx := acctest.Context(t)
if testing.Short() {
t.Skip("skipping long-running test in short mode")
Expand Down Expand Up @@ -1817,6 +1839,33 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersFailover_autoFailover
})
}

func TestAccElastiCacheReplicationGroup_autoFailoverEnabled_validateNumberCacheClusters(t *testing.T) {
ctx := acctest.Context(t)
if testing.Short() {
t.Skip("skipping long-running test in short mode")
}

rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix)

const (
autoFailoverEnabled = true
multiAZDisabled = false
)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acctest.PreCheck(ctx, t) },
ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID),
ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories,
CheckDestroy: testAccCheckReplicationGroupDestroy(ctx),
Steps: []resource.TestStep{
{
Config: testAccReplicationGroupConfig_failoverMultiAZ(rName, 1, autoFailoverEnabled, multiAZDisabled),
ExpectError: regexache.MustCompile(`"num_cache_clusters": must be at least 2 if automatic_failover_enabled is true`),
},
},
})
}

func TestAccElastiCacheReplicationGroup_NumberCacheClusters_multiAZEnabled(t *testing.T) {
ctx := acctest.Context(t)
if testing.Short() {
Expand Down Expand Up @@ -3075,6 +3124,20 @@ resource "aws_elasticache_replication_group" "test" {
`, rName)
}

func testAccReplicationGroupConfig_cacheClustersConflictsWithReplicasPerNodeGroup(rName string) string {
return fmt.Sprintf(`
resource "aws_elasticache_replication_group" "test" {
replication_group_id = %[1]q
description = "test description"
node_type = "cache.t3.small"

automatic_failover_enabled = true
num_cache_clusters = 2
replicas_per_node_group = 0
}
`, rName)
}

func testAccReplicationGroupConfig_v5(rName string) string {
return fmt.Sprintf(`
resource "aws_elasticache_replication_group" "test" {
Expand Down Expand Up @@ -3929,7 +3992,7 @@ resource "aws_elasticache_replication_group" "test" {
node_type = "cache.t2.micro"
num_cache_clusters = %[2]d
replication_group_id = %[1]q
description = "Terraform Acceptance Testing - num_cache_clusters"
description = "test description"
subnet_group_name = aws_elasticache_subnet_group.test.name

tags = {
Expand All @@ -3956,7 +4019,7 @@ resource "aws_elasticache_replication_group" "test" {
node_type = "cache.t3.medium"
num_cache_clusters = %[2]d
replication_group_id = %[1]q
description = "Terraform Acceptance Testing - num_cache_clusters"
description = "test description"
subnet_group_name = aws_elasticache_subnet_group.test.name
}

Expand Down
13 changes: 11 additions & 2 deletions website/docs/r/elasticache_replication_group.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -216,19 +216,28 @@ The following arguments are optional:
* `kms_key_id` - (Optional) The ARN of the key that you wish to use if encrypting at rest. If not supplied, uses service managed encryption. Can be specified only if `at_rest_encryption_enabled = true`.
* `log_delivery_configuration` - (Optional, Redis only) Specifies the destination and format of Redis [SLOWLOG](https://redis.io/commands/slowlog) or Redis [Engine Log](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log). See the documentation on [Amazon ElastiCache](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Log_Delivery.html#Log_contents-engine-log). See [Log Delivery Configuration](#log-delivery-configuration) below for more details.
* `maintenance_window` – (Optional) Specifies the weekly time range for when maintenance on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC). The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
* `multi_az_enabled` - (Optional) Specifies whether to enable Multi-AZ Support for the replication group. If `true`, `automatic_failover_enabled` must also be enabled. Defaults to `false`.
* `multi_az_enabled` - (Optional) Specifies whether to enable Multi-AZ Support for the replication group.
If `true`, `automatic_failover_enabled` must also be enabled.
Defaults to `false`.
* `network_type` - (Optional) The IP versions for cache cluster connections. Valid values are `ipv4`, `ipv6` or `dual_stack`.
* `node_type` - (Optional) Instance class to be used. See AWS documentation for information on [supported node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html) and [guidance on selecting node types](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/nodes-select-size.html). Required unless `global_replication_group_id` is set. Cannot be set if `global_replication_group_id` is set.
* `notification_topic_arn` – (Optional) ARN of an SNS topic to send ElastiCache notifications to. Example: `arn:aws:sns:us-east-1:012345678999:my_sns_topic`
* `num_cache_clusters` - (Optional) Number of cache clusters (primary and replicas) this replication group will have. If Multi-AZ is enabled, the value of this parameter must be at least 2. Updates will occur before other modifications. Conflicts with `num_node_groups`. Defaults to `1`.
* `num_cache_clusters` - (Optional) Number of cache clusters (primary and replicas) this replication group will have.
If `automatic_failover_enabled` or `multi_az_enabled` are `true`, must be at least 2.
Updates will occur before other modifications.
Conflicts with `num_node_groups` and `replicas_per_node_group`.
Defaults to `1`.
* `num_node_groups` - (Optional) Number of node groups (shards) for this Redis replication group.
Changing this number will trigger a resizing operation before other settings modifications.
Conflicts with `num_cache_clusters`.
* `parameter_group_name` - (Optional) Name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. To enable "cluster mode", i.e., data sharding, use a parameter group that has the parameter `cluster-enabled` set to true.
* `port` – (Optional) Port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379.
* `preferred_cache_cluster_azs` - (Optional) List of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is considered. The first item in the list will be the primary node. Ignored when updating.
* `replicas_per_node_group` - (Optional) Number of replica nodes in each node group.
Changing this number will trigger a resizing operation before other settings modifications.
Valid values are 0 to 5.
Conflicts with `num_cache_clusters`.
Can only be set if `num_node_groups` is set.
* `security_group_ids` - (Optional) IDs of one or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud.
* `security_group_names` - (Optional) Names of one or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud.
* `snapshot_arns` – (Optional) List of ARNs that identify Redis RDB snapshot files stored in Amazon S3. The names object names cannot contain any commas.
Expand Down
Loading