From 3cc1a09d981dd6f2d21ca9ea1f8136a7238ab955 Mon Sep 17 00:00:00 2001 From: arush sharma Date: Thu, 6 Mar 2025 17:59:54 -0800 Subject: [PATCH 1/5] add replica update to crd --- apis/v1alpha1/ack-generate-metadata.yaml | 10 +- apis/v1alpha1/generator.yaml | 11 +- apis/v1alpha1/table.go | 6 +- apis/v1alpha1/zz_generated.deepcopy.go | 15 +- .../dynamodb.services.k8s.aws_tables.yaml | 42 +- generator.yaml | 11 +- .../dynamodb.services.k8s.aws_tables.yaml | 42 +- pkg/resource/table/hooks.go | 33 +- pkg/resource/table/hooks_replica_updates.go | 416 ++++++++++++++++++ pkg/resource/table/sdk.go | 132 +++--- .../table/sdk_create_post_set_output.go.tpl | 9 + .../table/sdk_delete_pre_build_request.go.tpl | 23 +- .../table/sdk_read_one_post_set_output.go.tpl | 39 ++ 13 files changed, 712 insertions(+), 77 deletions(-) create mode 100644 pkg/resource/table/hooks_replica_updates.go diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index 13f2367..72b9382 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,13 +1,13 @@ ack_generate_info: - build_date: "2025-02-20T18:05:49Z" - build_hash: a326346bd3a6973254d247c9ab2dc76790c36241 + build_date: "2025-03-12T23:25:20Z" + build_hash: 5645e51ed3c413f616e1b929f1527a5139c44198 go_version: go1.24.0 - version: v0.43.2 -api_directory_checksum: 3bc0637159c94a74d2402d9a707f9c21339e9b45 + version: v0.43.2-3-g5645e51 +api_directory_checksum: 0d5b95bdbe63c6cfc495149b7a86440c4a5fb33a api_version: v1alpha1 aws_sdk_go_version: v1.32.6 generator_config_info: - file_checksum: f6d68afa724d9e1d8fb6ce58da11ed0e5635f9d5 + file_checksum: 48323d41b9cb7ade08c8a87367846cccdd1f6409 original_file_name: generator.yaml last_modification: reason: API generation diff --git a/apis/v1alpha1/generator.yaml b/apis/v1alpha1/generator.yaml index 22f91e8..f981faf 100644 --- a/apis/v1alpha1/generator.yaml +++ b/apis/v1alpha1/generator.yaml @@ -25,6 +25,15 @@ operations: resources: Table: fields: + Replicas: + custom_field: + list_of: CreateReplicationGroupMemberAction + compare: + is_ignored: true + ReplicasDescriptions: + custom_field: + list_of: ReplicaDescription + is_read_only: true GlobalSecondaryIndexesDescriptions: custom_field: list_of: GlobalSecondaryIndexDescription @@ -64,7 +73,7 @@ resources: references: service_name: kms resource: Key - path: Status.ACKResourceMetadata.ARN + path: Status.ACKResourceMetadata.ARN exceptions: errors: 404: diff --git a/apis/v1alpha1/table.go b/apis/v1alpha1/table.go index f2cc8b1..4f7bf00 100644 --- a/apis/v1alpha1/table.go +++ b/apis/v1alpha1/table.go @@ -141,7 +141,8 @@ type TableSpec struct { // For current minimum and maximum provisioned throughput values, see Service, // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. - ProvisionedThroughput *ProvisionedThroughput `json:"provisionedThroughput,omitempty"` + ProvisionedThroughput *ProvisionedThroughput `json:"provisionedThroughput,omitempty"` + Replicas []*CreateReplicationGroupMemberAction `json:"replicas,omitempty"` // Represents the settings used to enable server-side encryption. SSESpecification *SSESpecification `json:"sseSpecification,omitempty"` // The settings for DynamoDB Streams on the table. These settings consist of: @@ -220,9 +221,8 @@ type TableStatus struct { // * StreamLabel // +kubebuilder:validation:Optional LatestStreamLabel *string `json:"latestStreamLabel,omitempty"` - // Represents replicas of the table. // +kubebuilder:validation:Optional - Replicas []*ReplicaDescription `json:"replicas,omitempty"` + ReplicasDescriptions []*ReplicaDescription `json:"replicasDescriptions,omitempty"` // Contains details for the restore. // +kubebuilder:validation:Optional RestoreSummary *RestoreSummary `json:"restoreSummary,omitempty"` diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 0d584eb..a97bee2 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -2739,6 +2739,17 @@ func (in *TableSpec) DeepCopyInto(out *TableSpec) { *out = new(ProvisionedThroughput) (*in).DeepCopyInto(*out) } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = make([]*CreateReplicationGroupMemberAction, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CreateReplicationGroupMemberAction) + (*in).DeepCopyInto(*out) + } + } + } if in.SSESpecification != nil { in, out := &in.SSESpecification, &out.SSESpecification *out = new(SSESpecification) @@ -2846,8 +2857,8 @@ func (in *TableStatus) DeepCopyInto(out *TableStatus) { *out = new(string) **out = **in } - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas + if in.ReplicasDescriptions != nil { + in, out := &in.ReplicasDescriptions, &out.ReplicasDescriptions *out = make([]*ReplicaDescription, len(*in)) for i := range *in { if (*in)[i] != nil { diff --git a/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml b/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml index ee3e5a4..896b71a 100644 --- a/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml +++ b/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml @@ -322,6 +322,45 @@ spec: format: int64 type: integer type: object + replicas: + items: + description: Represents a replica to be created. + properties: + globalSecondaryIndexes: + items: + description: Represents the properties of a replica global + secondary index. + properties: + indexName: + type: string + provisionedThroughputOverride: + description: |- + Replica-specific provisioned throughput settings. If not specified, uses + the source table's provisioned throughput settings. + properties: + readCapacityUnits: + format: int64 + type: integer + type: object + type: object + type: array + kmsMasterKeyID: + type: string + provisionedThroughputOverride: + description: |- + Replica-specific provisioned throughput settings. If not specified, uses + the source table's provisioned throughput settings. + properties: + readCapacityUnits: + format: int64 + type: integer + type: object + regionName: + type: string + tableClassOverride: + type: string + type: object + type: array sseSpecification: description: Represents the settings used to enable server-side encryption. properties: @@ -605,8 +644,7 @@ spec: * StreamLabel type: string - replicas: - description: Represents replicas of the table. + replicasDescriptions: items: description: Contains the details of the replica. properties: diff --git a/generator.yaml b/generator.yaml index 22f91e8..f981faf 100644 --- a/generator.yaml +++ b/generator.yaml @@ -25,6 +25,15 @@ operations: resources: Table: fields: + Replicas: + custom_field: + list_of: CreateReplicationGroupMemberAction + compare: + is_ignored: true + ReplicasDescriptions: + custom_field: + list_of: ReplicaDescription + is_read_only: true GlobalSecondaryIndexesDescriptions: custom_field: list_of: GlobalSecondaryIndexDescription @@ -64,7 +73,7 @@ resources: references: service_name: kms resource: Key - path: Status.ACKResourceMetadata.ARN + path: Status.ACKResourceMetadata.ARN exceptions: errors: 404: diff --git a/helm/crds/dynamodb.services.k8s.aws_tables.yaml b/helm/crds/dynamodb.services.k8s.aws_tables.yaml index 36dfe26..f24770b 100644 --- a/helm/crds/dynamodb.services.k8s.aws_tables.yaml +++ b/helm/crds/dynamodb.services.k8s.aws_tables.yaml @@ -326,6 +326,45 @@ spec: format: int64 type: integer type: object + replicas: + items: + description: Represents a replica to be created. + properties: + globalSecondaryIndexes: + items: + description: Represents the properties of a replica global + secondary index. + properties: + indexName: + type: string + provisionedThroughputOverride: + description: |- + Replica-specific provisioned throughput settings. If not specified, uses + the source table's provisioned throughput settings. + properties: + readCapacityUnits: + format: int64 + type: integer + type: object + type: object + type: array + kmsMasterKeyID: + type: string + provisionedThroughputOverride: + description: |- + Replica-specific provisioned throughput settings. If not specified, uses + the source table's provisioned throughput settings. + properties: + readCapacityUnits: + format: int64 + type: integer + type: object + regionName: + type: string + tableClassOverride: + type: string + type: object + type: array sseSpecification: description: Represents the settings used to enable server-side encryption. properties: @@ -609,8 +648,7 @@ spec: * StreamLabel type: string - replicas: - description: Represents replicas of the table. + replicasDescriptions: items: description: Contains the details of the replica. properties: diff --git a/pkg/resource/table/hooks.go b/pkg/resource/table/hooks.go index 22a2577..f37d1a4 100644 --- a/pkg/resource/table/hooks.go +++ b/pkg/resource/table/hooks.go @@ -34,21 +34,25 @@ import ( var ( ErrTableDeleting = fmt.Errorf( - "Table in '%v' state, cannot be modified or deleted", + "table in '%v' state, cannot be modified or deleted", svcsdktypes.TableStatusDeleting, ) ErrTableCreating = fmt.Errorf( - "Table in '%v' state, cannot be modified or deleted", + "table in '%v' state, cannot be modified or deleted", svcsdktypes.TableStatusCreating, ) ErrTableUpdating = fmt.Errorf( - "Table in '%v' state, cannot be modified or deleted", + "table in '%v' state, cannot be modified or deleted", svcsdktypes.TableStatusUpdating, ) ErrTableGSIsUpdating = fmt.Errorf( - "Table GSIs in '%v' state, cannot be modified or deleted", + "table GSIs in '%v' state, cannot be modified or deleted", svcsdktypes.IndexStatusCreating, ) + ErrReplicaNotActive = fmt.Errorf( + "table replica in NOT '%v' state, cannot be modified or deleted", + svcsdktypes.ReplicaStatusActive, + ) ) // TerminalStatuses are the status strings that are terminal states for a @@ -80,6 +84,10 @@ var ( ErrTableGSIsUpdating, 10*time.Second, ) + requeueWaitForReplicasActive = ackrequeue.NeededAfter( + ErrReplicaNotActive, + 10*time.Second, + ) ) // tableHasTerminalStatus returns whether the supplied Dynamodb table is in a @@ -224,6 +232,10 @@ func (rm *resourceManager) customUpdateTable( } return nil, err } + case delta.DifferentAt("Spec.Replicas"): + if err := rm.syncReplicaUpdates(ctx, latest, desired); err != nil { + return nil, err + } } } @@ -262,6 +274,10 @@ func (rm *resourceManager) newUpdateTablePayload( input := &svcsdk.UpdateTableInput{ TableName: aws.String(*r.ko.Spec.TableName), } + // If delta is nil, we're just creating a basic payload for other operations to modify + if delta == nil { + return input, nil + } if delta.DifferentAt("Spec.BillingMode") { if r.ko.Spec.BillingMode != nil { @@ -558,6 +574,15 @@ func customPreCompare( } } + // Handle ReplicaUpdates comparison + if len(a.ko.Spec.Replicas) != len(b.ko.Spec.Replicas) { + delta.Add("Spec.Replicas", a.ko.Spec.Replicas, b.ko.Spec.Replicas) + } else if a.ko.Spec.Replicas != nil && b.ko.Spec.Replicas != nil { + if !equalReplicaArrays(a.ko.Spec.Replicas, b.ko.Spec.Replicas) { + delta.Add("Spec.Replicas", a.ko.Spec.Replicas, b.ko.Spec.Replicas) + } + } + if a.ko.Spec.DeletionProtectionEnabled == nil { a.ko.Spec.DeletionProtectionEnabled = aws.Bool(false) } diff --git a/pkg/resource/table/hooks_replica_updates.go b/pkg/resource/table/hooks_replica_updates.go new file mode 100644 index 0000000..ec5995f --- /dev/null +++ b/pkg/resource/table/hooks_replica_updates.go @@ -0,0 +1,416 @@ +package table + +import ( + "context" + "errors" + + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws-controllers-k8s/dynamodb-controller/apis/v1alpha1" + svcsdk "github.com/aws/aws-sdk-go-v2/service/dynamodb" + svcsdktypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" +) + +// equalCreateReplicationGroupMemberActions compares two CreateReplicationGroupMemberAction objects +func equalCreateReplicationGroupMemberActions(a, b *v1alpha1.CreateReplicationGroupMemberAction) bool { + if !equalStrings(a.RegionName, b.RegionName) { + return false + } + if !equalStrings(a.KMSMasterKeyID, b.KMSMasterKeyID) { + return false + } + if !equalStrings(a.TableClassOverride, b.TableClassOverride) { + return false + } + if a.ProvisionedThroughputOverride != nil && b.ProvisionedThroughputOverride != nil { + if !equalInt64s(a.ProvisionedThroughputOverride.ReadCapacityUnits, b.ProvisionedThroughputOverride.ReadCapacityUnits) { + return false + } + } else if (a.ProvisionedThroughputOverride == nil) != (b.ProvisionedThroughputOverride == nil) { + return false + } + + return equalReplicaGlobalSecondaryIndexArrays(a.GlobalSecondaryIndexes, b.GlobalSecondaryIndexes) +} + +// equalReplicaGlobalSecondaryIndexes compares two ReplicaGlobalSecondaryIndex objects +func equalReplicaGlobalSecondaryIndexes( + a *v1alpha1.ReplicaGlobalSecondaryIndex, + b *v1alpha1.ReplicaGlobalSecondaryIndex, +) bool { + if !equalStrings(a.IndexName, b.IndexName) { + return false + } + + if a.ProvisionedThroughputOverride != nil && b.ProvisionedThroughputOverride != nil { + if !equalInt64s(a.ProvisionedThroughputOverride.ReadCapacityUnits, b.ProvisionedThroughputOverride.ReadCapacityUnits) { + return false + } + } else if (a.ProvisionedThroughputOverride == nil) != (b.ProvisionedThroughputOverride == nil) { + return false + } + + return true +} + +// equalReplicaGlobalSecondaryIndexArrays compares two arrays of ReplicaGlobalSecondaryIndex objects +func equalReplicaGlobalSecondaryIndexArrays( + a []*v1alpha1.ReplicaGlobalSecondaryIndex, + b []*v1alpha1.ReplicaGlobalSecondaryIndex, +) bool { + if len(a) != len(b) { + return false + } + + aGSIMap := make(map[string]*v1alpha1.ReplicaGlobalSecondaryIndex) + bGSIMap := make(map[string]*v1alpha1.ReplicaGlobalSecondaryIndex) + + for _, gsi := range a { + if gsi.IndexName != nil { + aGSIMap[*gsi.IndexName] = gsi + } + } + + for _, gsi := range b { + if gsi.IndexName != nil { + bGSIMap[*gsi.IndexName] = gsi + } + } + + for indexName, aGSI := range aGSIMap { + bGSI, exists := bGSIMap[indexName] + if !exists { + return false + } + + if !equalReplicaGlobalSecondaryIndexes(aGSI, bGSI) { + return false + } + } + + return true +} + +// equalReplicaArrays returns whether two CreateReplicationGroupMemberAction arrays are equal or not. +func equalReplicaArrays(a, b []*v1alpha1.CreateReplicationGroupMemberAction) bool { + if len(a) != len(b) { + return false + } + + aMap := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) + bMap := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) + + for _, replica := range a { + if replica.RegionName != nil { + aMap[*replica.RegionName] = replica + } + } + + for _, replica := range b { + if replica.RegionName != nil { + bMap[*replica.RegionName] = replica + } + } + + for regionName, aReplica := range aMap { + bReplica, exists := bMap[regionName] + if !exists { + return false + } + + if !equalCreateReplicationGroupMemberActions(aReplica, bReplica) { + return false + } + } + + for regionName := range bMap { + if _, exists := aMap[regionName]; !exists { + return false + } + } + + return true +} + +// createReplicaUpdate creates a ReplicationGroupUpdate for creating a new replica +func createReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) svcsdktypes.ReplicationGroupUpdate { + replicaUpdate := svcsdktypes.ReplicationGroupUpdate{} + createAction := &svcsdktypes.CreateReplicationGroupMemberAction{} + + if replica.RegionName != nil { + createAction.RegionName = aws.String(*replica.RegionName) + } + + if replica.KMSMasterKeyID != nil { + createAction.KMSMasterKeyId = aws.String(*replica.KMSMasterKeyID) + } + + if replica.TableClassOverride != nil { + createAction.TableClassOverride = svcsdktypes.TableClass(*replica.TableClassOverride) + } + + if replica.ProvisionedThroughputOverride != nil { + createAction.ProvisionedThroughputOverride = &svcsdktypes.ProvisionedThroughputOverride{} + if replica.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + createAction.ProvisionedThroughputOverride.ReadCapacityUnits = replica.ProvisionedThroughputOverride.ReadCapacityUnits + } + } + + if replica.GlobalSecondaryIndexes != nil { + gsiList := []svcsdktypes.ReplicaGlobalSecondaryIndex{} + for _, gsi := range replica.GlobalSecondaryIndexes { + replicaGSI := svcsdktypes.ReplicaGlobalSecondaryIndex{} + if gsi.IndexName != nil { + replicaGSI.IndexName = aws.String(*gsi.IndexName) + } + if gsi.ProvisionedThroughputOverride != nil { + replicaGSI.ProvisionedThroughputOverride = &svcsdktypes.ProvisionedThroughputOverride{} + if gsi.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + replicaGSI.ProvisionedThroughputOverride.ReadCapacityUnits = gsi.ProvisionedThroughputOverride.ReadCapacityUnits + } + } + gsiList = append(gsiList, replicaGSI) + } + createAction.GlobalSecondaryIndexes = gsiList + } + + replicaUpdate.Create = createAction + return replicaUpdate +} + +// updateReplicaUpdate creates a ReplicationGroupUpdate for updating an existing replica +func updateReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) svcsdktypes.ReplicationGroupUpdate { + replicaUpdate := svcsdktypes.ReplicationGroupUpdate{} + updateAction := &svcsdktypes.UpdateReplicationGroupMemberAction{} + + if replica.RegionName != nil { + updateAction.RegionName = aws.String(*replica.RegionName) + } + + if replica.KMSMasterKeyID != nil { + updateAction.KMSMasterKeyId = aws.String(*replica.KMSMasterKeyID) + } + + if replica.TableClassOverride != nil { + updateAction.TableClassOverride = svcsdktypes.TableClass(*replica.TableClassOverride) + } + + if replica.ProvisionedThroughputOverride != nil { + updateAction.ProvisionedThroughputOverride = &svcsdktypes.ProvisionedThroughputOverride{} + if replica.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + updateAction.ProvisionedThroughputOverride.ReadCapacityUnits = replica.ProvisionedThroughputOverride.ReadCapacityUnits + } + } + + if replica.GlobalSecondaryIndexes != nil { + gsiList := []svcsdktypes.ReplicaGlobalSecondaryIndex{} + for _, gsi := range replica.GlobalSecondaryIndexes { + replicaGSI := svcsdktypes.ReplicaGlobalSecondaryIndex{} + if gsi.IndexName != nil { + replicaGSI.IndexName = aws.String(*gsi.IndexName) + } + if gsi.ProvisionedThroughputOverride != nil { + replicaGSI.ProvisionedThroughputOverride = &svcsdktypes.ProvisionedThroughputOverride{} + if gsi.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + replicaGSI.ProvisionedThroughputOverride.ReadCapacityUnits = gsi.ProvisionedThroughputOverride.ReadCapacityUnits + } + } + gsiList = append(gsiList, replicaGSI) + } + updateAction.GlobalSecondaryIndexes = gsiList + } + + replicaUpdate.Update = updateAction + return replicaUpdate +} + +// deleteReplicaUpdate creates a ReplicationGroupUpdate for deleting an existing replica +func deleteReplicaUpdate(regionName string) svcsdktypes.ReplicationGroupUpdate { + return svcsdktypes.ReplicationGroupUpdate{ + Delete: &svcsdktypes.DeleteReplicationGroupMemberAction{ + RegionName: aws.String(regionName), + }, + } +} + +// canUpdateTableReplicas returns true if it's possible to update table replicas. +// We can only modify replicas when they are in ACTIVE state. +func canUpdateTableReplicas(r *resource) bool { + if isTableCreating(r) || isTableDeleting(r) || isTableUpdating(r) { + return false + } + + // Check if any replica is not in ACTIVE state + if r.ko.Status.ReplicasDescriptions != nil { + for _, replicaDesc := range r.ko.Status.ReplicasDescriptions { + if replicaDesc.RegionName != nil && replicaDesc.ReplicaStatus != nil { + if *replicaDesc.ReplicaStatus != string(svcsdktypes.ReplicaStatusActive) { + return false + } + } + } + } + return true +} + +// hasStreamSpecificationWithNewAndOldImages checks if the table has DynamoDB Streams enabled +// with the stream containing both the new and the old images of the item. +func hasStreamSpecificationWithNewAndOldImages(r *resource) bool { + StreamEnabled := r.ko.Spec.StreamSpecification != nil && + r.ko.Spec.StreamSpecification.StreamEnabled != nil && + *r.ko.Spec.StreamSpecification.StreamEnabled + StreamViewType := r.ko.Spec.StreamSpecification != nil && + r.ko.Spec.StreamSpecification.StreamViewType != nil && + *r.ko.Spec.StreamSpecification.StreamViewType == "NEW_AND_OLD_IMAGES" + return StreamEnabled && StreamViewType +} + +// syncReplicaUpdates updates the replica configuration for a table +func (rm *resourceManager) syncReplicaUpdates( + ctx context.Context, + latest *resource, + desired *resource, +) (err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.syncReplicaUpdates") + defer exit(err) + + if !hasStreamSpecificationWithNewAndOldImages(desired) { + msg := "table must have DynamoDB Streams enabled with StreamViewType set to NEW_AND_OLD_IMAGES for replica updates" + rlog.Debug(msg) + return ackerr.NewTerminalError(errors.New(msg)) + } + + if !canUpdateTableReplicas(latest) { + return requeueWaitForReplicasActive + } + + if isTableUpdating(latest) { + return requeueWaitWhileUpdating + } + + input, replicasInQueue, err := rm.newUpdateTableReplicaUpdatesOneAtATimePayload(ctx, latest, desired) + if err != nil { + return err + } + + // If there are no updates to make, we don't need to requeue + if len(input.ReplicaUpdates) == 0 { + return nil + } + + // Call the UpdateTable API + _, err = rm.sdkapi.UpdateTable(ctx, input) + rm.metrics.RecordAPICall("UPDATE", "UpdateTable", err) + if err != nil { + return err + } + + // If there are more replicas to process, requeue + if replicasInQueue > 0 { + rlog.Debug("more replica updates pending, will requeue", + "table", *latest.ko.Spec.TableName, + "remaining_updates", replicasInQueue) + return requeueWaitWhileUpdating + } + + return nil +} + +// newUpdateTableReplicaUpdatesOneAtATimePayload creates the UpdateTable input payload for replica updates, +// processing only one replica at a time +func (rm *resourceManager) newUpdateTableReplicaUpdatesOneAtATimePayload( + ctx context.Context, + latest *resource, + desired *resource, +) (input *svcsdk.UpdateTableInput, replicasInQueue int, err error) { + rlog := ackrtlog.FromContext(ctx) + exit := rlog.Trace("rm.newUpdateTableReplicaUpdatesOneAtATimePayload") + defer exit(err) + + createReplicas, updateReplicas, deleteRegions := calculateReplicaUpdates(latest, desired) + + input = &svcsdk.UpdateTableInput{ + TableName: aws.String(*desired.ko.Spec.TableName), + ReplicaUpdates: []svcsdktypes.ReplicationGroupUpdate{}, + } + + totalReplicasOperations := len(createReplicas) + len(updateReplicas) + len(deleteRegions) + replicasInQueue = totalReplicasOperations - 1 + + // Process replica updates in order: create, update, delete + // We'll only perform one replica action at a time + + if len(createReplicas) > 0 { + region := *createReplicas[0].RegionName + rlog.Debug("creating replica in region", "table", *desired.ko.Spec.TableName, "region", region) + input.ReplicaUpdates = append(input.ReplicaUpdates, createReplicaUpdate(createReplicas[0])) + return input, replicasInQueue, nil + } + + if len(updateReplicas) > 0 { + region := *updateReplicas[0].RegionName + rlog.Debug("updating replica in region", "table", *desired.ko.Spec.TableName, "region", region) + input.ReplicaUpdates = append(input.ReplicaUpdates, updateReplicaUpdate(updateReplicas[0])) + return input, replicasInQueue, nil + } + + if len(deleteRegions) > 0 { + region := deleteRegions[0] + rlog.Debug("deleting replica in region", "table", *desired.ko.Spec.TableName, "region", region) + input.ReplicaUpdates = append(input.ReplicaUpdates, deleteReplicaUpdate(deleteRegions[0])) + return input, replicasInQueue, nil + } + + return input, replicasInQueue, nil +} + +// calculateReplicaUpdates calculates the replica updates needed to reconcile the latest state with the desired state +// Returns three slices: replicas to create, replicas to update, and region names to delete +func calculateReplicaUpdates( + latest *resource, + desired *resource, +) ( + createReplicas []*v1alpha1.CreateReplicationGroupMemberAction, + updateReplicas []*v1alpha1.CreateReplicationGroupMemberAction, + deleteRegions []string, +) { + existingRegions := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) + if latest != nil && latest.ko.Spec.Replicas != nil { + for _, replica := range latest.ko.Spec.Replicas { + if replica.RegionName != nil { + existingRegions[*replica.RegionName] = replica + } + } + } + + desiredRegions := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) + if desired != nil && desired.ko.Spec.Replicas != nil { + for _, replica := range desired.ko.Spec.Replicas { + if replica.RegionName != nil { + desiredRegions[*replica.RegionName] = replica + } + } + } + + // Calculate replicas to create or update + for regionName, desiredReplica := range desiredRegions { + existingReplica, exists := existingRegions[regionName] + if !exists { + createReplicas = append(createReplicas, desiredReplica) + } else if !equalCreateReplicationGroupMemberActions(existingReplica, desiredReplica) { + updateReplicas = append(updateReplicas, desiredReplica) + } + } + + // Calculate regions to delete + for regionName := range existingRegions { + if _, exists := desiredRegions[regionName]; !exists { + deleteRegions = append(deleteRegions, regionName) + } + } + + return createReplicas, updateReplicas, deleteRegions +} diff --git a/pkg/resource/table/sdk.go b/pkg/resource/table/sdk.go index ce93f8a..6f8588c 100644 --- a/pkg/resource/table/sdk.go +++ b/pkg/resource/table/sdk.go @@ -264,13 +264,13 @@ func (rm *resourceManager) sdkFind( ko.Spec.ProvisionedThroughput = nil } if resp.Table.Replicas != nil { - f12 := []*svcapitypes.ReplicaDescription{} + f12 := []*svcapitypes.CreateReplicationGroupMemberAction{} for _, f12iter := range resp.Table.Replicas { - f12elem := &svcapitypes.ReplicaDescription{} + f12elem := &svcapitypes.CreateReplicationGroupMemberAction{} if f12iter.GlobalSecondaryIndexes != nil { - f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndexDescription{} + f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndex{} for _, f12elemf0iter := range f12iter.GlobalSecondaryIndexes { - f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndexDescription{} + f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndex{} if f12elemf0iter.IndexName != nil { f12elemf0elem.IndexName = f12elemf0iter.IndexName } @@ -298,33 +298,11 @@ func (rm *resourceManager) sdkFind( if f12iter.RegionName != nil { f12elem.RegionName = f12iter.RegionName } - if f12iter.ReplicaInaccessibleDateTime != nil { - f12elem.ReplicaInaccessibleDateTime = &metav1.Time{*f12iter.ReplicaInaccessibleDateTime} - } - if f12iter.ReplicaStatus != "" { - f12elem.ReplicaStatus = aws.String(string(f12iter.ReplicaStatus)) - } - if f12iter.ReplicaStatusDescription != nil { - f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription - } - if f12iter.ReplicaStatusPercentProgress != nil { - f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress - } - if f12iter.ReplicaTableClassSummary != nil { - f12elemf8 := &svcapitypes.TableClassSummary{} - if f12iter.ReplicaTableClassSummary.LastUpdateDateTime != nil { - f12elemf8.LastUpdateDateTime = &metav1.Time{*f12iter.ReplicaTableClassSummary.LastUpdateDateTime} - } - if f12iter.ReplicaTableClassSummary.TableClass != "" { - f12elemf8.TableClass = aws.String(string(f12iter.ReplicaTableClassSummary.TableClass)) - } - f12elem.ReplicaTableClassSummary = f12elemf8 - } f12 = append(f12, f12elem) } - ko.Status.Replicas = f12 + ko.Spec.Replicas = f12 } else { - ko.Status.Replicas = nil + ko.Spec.Replicas = nil } if resp.Table.RestoreSummary != nil { f13 := &svcapitypes.RestoreSummary{} @@ -440,6 +418,39 @@ func (rm *resourceManager) sdkFind( } else { ko.Spec.BillingMode = aws.String("PROVISIONED") } + if resp.Table.Replicas != nil { + f12 := []*svcapitypes.ReplicaDescription{} + for _, f12iter := range resp.Table.Replicas { + f12elem := &svcapitypes.ReplicaDescription{} + if f12iter.RegionName != nil { + f12elem.RegionName = f12iter.RegionName + } + if f12iter.ReplicaStatus != "" { + f12elem.ReplicaStatus = aws.String(string(f12iter.ReplicaStatus)) + } else { + f12elem.ReplicaStatus = aws.String("Unknown") + } + if f12iter.ReplicaStatusDescription != nil { + f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription + } else { + f12elem.ReplicaStatusDescription = aws.String("") + } + if f12iter.ReplicaStatusPercentProgress != nil { + f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress + } else { + f12elem.ReplicaStatusPercentProgress = aws.String("0") + } + if f12iter.ReplicaInaccessibleDateTime != nil { + f12elem.ReplicaInaccessibleDateTime = &metav1.Time{Time: *f12iter.ReplicaInaccessibleDateTime} + } else { + f12elem.ReplicaInaccessibleDateTime = nil + } + f12 = append(f12, f12elem) + } + ko.Status.ReplicasDescriptions = f12 + } else { + ko.Status.ReplicasDescriptions = nil + } if isTableCreating(&resource{ko}) { return &resource{ko}, requeueWaitWhileCreating } @@ -681,13 +692,13 @@ func (rm *resourceManager) sdkCreate( ko.Spec.ProvisionedThroughput = nil } if resp.TableDescription.Replicas != nil { - f12 := []*svcapitypes.ReplicaDescription{} + f12 := []*svcapitypes.CreateReplicationGroupMemberAction{} for _, f12iter := range resp.TableDescription.Replicas { - f12elem := &svcapitypes.ReplicaDescription{} + f12elem := &svcapitypes.CreateReplicationGroupMemberAction{} if f12iter.GlobalSecondaryIndexes != nil { - f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndexDescription{} + f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndex{} for _, f12elemf0iter := range f12iter.GlobalSecondaryIndexes { - f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndexDescription{} + f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndex{} if f12elemf0iter.IndexName != nil { f12elemf0elem.IndexName = f12elemf0iter.IndexName } @@ -715,33 +726,11 @@ func (rm *resourceManager) sdkCreate( if f12iter.RegionName != nil { f12elem.RegionName = f12iter.RegionName } - if f12iter.ReplicaInaccessibleDateTime != nil { - f12elem.ReplicaInaccessibleDateTime = &metav1.Time{*f12iter.ReplicaInaccessibleDateTime} - } - if f12iter.ReplicaStatus != "" { - f12elem.ReplicaStatus = aws.String(string(f12iter.ReplicaStatus)) - } - if f12iter.ReplicaStatusDescription != nil { - f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription - } - if f12iter.ReplicaStatusPercentProgress != nil { - f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress - } - if f12iter.ReplicaTableClassSummary != nil { - f12elemf8 := &svcapitypes.TableClassSummary{} - if f12iter.ReplicaTableClassSummary.LastUpdateDateTime != nil { - f12elemf8.LastUpdateDateTime = &metav1.Time{*f12iter.ReplicaTableClassSummary.LastUpdateDateTime} - } - if f12iter.ReplicaTableClassSummary.TableClass != "" { - f12elemf8.TableClass = aws.String(string(f12iter.ReplicaTableClassSummary.TableClass)) - } - f12elem.ReplicaTableClassSummary = f12elemf8 - } f12 = append(f12, f12elem) } - ko.Status.Replicas = f12 + ko.Spec.Replicas = f12 } else { - ko.Status.Replicas = nil + ko.Spec.Replicas = nil } if resp.TableDescription.RestoreSummary != nil { f13 := &svcapitypes.RestoreSummary{} @@ -807,6 +796,15 @@ func (rm *resourceManager) sdkCreate( return nil, err } } + // Check if replicas were specified during creation + if desired.ko.Spec.Replicas != nil && len(desired.ko.Spec.Replicas) > 0 { + // Copy the replica configuration to the new resource + ko.Spec.Replicas = desired.ko.Spec.Replicas + + // Return with a requeue to process replica updates + // This will trigger the reconciliation loop again, which will call syncReplicaUpdates + return &resource{ko}, requeueWaitWhileUpdating + } return &resource{ko}, nil } @@ -1016,6 +1014,28 @@ func (rm *resourceManager) sdkDelete( if isTableUpdating(r) { return nil, requeueWaitWhileUpdating } + + // If there are replicas, we need to remove them before deleting the table + if r.ko.Spec.Replicas != nil && len(r.ko.Spec.Replicas) > 0 { + // Create a desired state with no replicas + desired := &resource{ + ko: r.ko.DeepCopy(), + } + desired.ko.Spec.Replicas = nil + + // Call syncReplicaUpdates to remove all replicas + err := rm.syncReplicaUpdates(ctx, r, desired) + if err != nil { + if err == requeueWaitWhileUpdating { + // This is expected - we need to wait for the replica removal to complete + return nil, err + } + return nil, err + } + } + + r.ko.Spec.Replicas = nil + input, err := rm.newDeleteRequestPayload(r) if err != nil { return nil, err diff --git a/templates/hooks/table/sdk_create_post_set_output.go.tpl b/templates/hooks/table/sdk_create_post_set_output.go.tpl index 49d67a2..824bfad 100644 --- a/templates/hooks/table/sdk_create_post_set_output.go.tpl +++ b/templates/hooks/table/sdk_create_post_set_output.go.tpl @@ -2,4 +2,13 @@ if err := rm.syncTTL(ctx, desired, &resource{ko}); err != nil { return nil, err } + } + // Check if replicas were specified during creation + if desired.ko.Spec.Replicas != nil && len(desired.ko.Spec.Replicas) > 0 { + // Copy the replica configuration to the new resource + ko.Spec.Replicas = desired.ko.Spec.Replicas + + // Return with a requeue to process replica updates + // This will trigger the reconciliation loop again, which will call syncReplicaUpdates + return &resource{ko}, requeueWaitWhileUpdating } \ No newline at end of file diff --git a/templates/hooks/table/sdk_delete_pre_build_request.go.tpl b/templates/hooks/table/sdk_delete_pre_build_request.go.tpl index fa720f3..544321c 100644 --- a/templates/hooks/table/sdk_delete_pre_build_request.go.tpl +++ b/templates/hooks/table/sdk_delete_pre_build_request.go.tpl @@ -3,4 +3,25 @@ } if isTableUpdating(r) { return nil, requeueWaitWhileUpdating - } \ No newline at end of file + } + + // If there are replicas, we need to remove them before deleting the table + if r.ko.Spec.Replicas != nil && len(r.ko.Spec.Replicas) > 0 { + // Create a desired state with no replicas + desired := &resource{ + ko: r.ko.DeepCopy(), + } + desired.ko.Spec.Replicas = nil + + // Call syncReplicaUpdates to remove all replicas + err := rm.syncReplicaUpdates(ctx, r, desired) + if err != nil { + if err == requeueWaitWhileUpdating { + // This is expected - we need to wait for the replica removal to complete + return nil, err + } + return nil, err + } + } + + r.ko.Spec.Replicas = nil diff --git a/templates/hooks/table/sdk_read_one_post_set_output.go.tpl b/templates/hooks/table/sdk_read_one_post_set_output.go.tpl index 4525c25..0047844 100644 --- a/templates/hooks/table/sdk_read_one_post_set_output.go.tpl +++ b/templates/hooks/table/sdk_read_one_post_set_output.go.tpl @@ -53,6 +53,45 @@ } else { ko.Spec.BillingMode = aws.String("PROVISIONED") } + if resp.Table.Replicas != nil { + f12 := []*svcapitypes.ReplicaDescription{} + for _, f12iter := range resp.Table.Replicas { + f12elem := &svcapitypes.ReplicaDescription{} + if f12iter.RegionName != nil { + f12elem.RegionName = f12iter.RegionName + } + if f12iter.ReplicaStatus != "" { + f12elem.ReplicaStatus = aws.String(string(f12iter.ReplicaStatus)) + } else { + f12elem.ReplicaStatus = aws.String("Unknown") + } + if f12iter.ReplicaStatusDescription != nil { + f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription + } else { + f12elem.ReplicaStatusDescription = aws.String("") + } + if f12iter.ReplicaStatusPercentProgress != nil { + f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress + } else { + f12elem.ReplicaStatusPercentProgress = aws.String("0") + } + if f12iter.ReplicaInaccessibleDateTime != nil { + f12elem.ReplicaInaccessibleDateTime = &metav1.Time{Time: *f12iter.ReplicaInaccessibleDateTime} + } else { + f12elem.ReplicaInaccessibleDateTime = nil + } + if f12iter.ReplicaTableClassSummary != nil && f12iter.ReplicaTableClassSummary.TableClass != "" { + f12elem.ReplicaTableClassSummary.TableClass = aws.String(string(f12iter.ReplicaTableClassSummary.TableClass)) + f12elem.ReplicaTableClassSummary.LastUpdateDateTime = &metav1.Time{Time: *f12iter.ReplicaTableClassSummary.LastUpdateDateTime} + } else { + f12elem.ReplicaTableClassSummary.TableClass = aws.String("STANDARD") + } + f12 = append(f12, f12elem) + } + ko.Status.ReplicasDescriptions = f12 + } else { + ko.Status.ReplicasDescriptions = nil + } if isTableCreating(&resource{ko}) { return &resource{ko}, requeueWaitWhileCreating } From 018583072b7f712a8c95c4aa22ac53bd079a5c50 Mon Sep 17 00:00:00 2001 From: Arush Sharma Date: Thu, 13 Mar 2025 15:56:49 -0700 Subject: [PATCH 2/5] add tests --- apis/v1alpha1/ack-generate-metadata.yaml | 10 +- apis/v1alpha1/generator.yaml | 9 +- apis/v1alpha1/table.go | 5 +- apis/v1alpha1/zz_generated.deepcopy.go | 8 +- .../dynamodb.services.k8s.aws_tables.yaml | 5 +- generator.yaml | 9 +- .../dynamodb.services.k8s.aws_tables.yaml | 5 +- pkg/resource/table/hooks.go | 40 +- pkg/resource/table/hooks_replica_updates.go | 68 ++- pkg/resource/table/manager.go | 14 +- pkg/resource/table/sdk.go | 142 +++-- pkg/resource/table/tags.go | 44 +- .../table/sdk_create_post_set_output.go.tpl | 5 +- .../table/sdk_delete_pre_build_request.go.tpl | 12 +- .../table/sdk_read_one_post_set_output.go.tpl | 60 +-- .../table_with_gsi_and_replicas.yaml | 45 ++ test/e2e/resources/table_with_replicas.yaml | 24 + .../table_with_replicas_invalid.yaml | 20 + test/e2e/table.py | 58 +++ test/e2e/tests/test_gsi_and_replicas.py | 405 +++++++++++++++ test/e2e/tests/test_table_replicas.py | 483 ++++++++++++++++++ 21 files changed, 1272 insertions(+), 199 deletions(-) create mode 100644 test/e2e/resources/table_with_gsi_and_replicas.yaml create mode 100644 test/e2e/resources/table_with_replicas.yaml create mode 100644 test/e2e/resources/table_with_replicas_invalid.yaml create mode 100644 test/e2e/tests/test_gsi_and_replicas.py create mode 100644 test/e2e/tests/test_table_replicas.py diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index 72b9382..cd7b5bb 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,13 +1,13 @@ ack_generate_info: - build_date: "2025-03-12T23:25:20Z" - build_hash: 5645e51ed3c413f616e1b929f1527a5139c44198 + build_date: "2025-03-21T05:42:15Z" + build_hash: 3722729cebe6d3c03c7e442655ef0846f91566a2 go_version: go1.24.0 - version: v0.43.2-3-g5645e51 -api_directory_checksum: 0d5b95bdbe63c6cfc495149b7a86440c4a5fb33a + version: v0.43.2-7-g3722729 +api_directory_checksum: a3ce13b1988591541d32ac56e55b416ff3b6bfc2 api_version: v1alpha1 aws_sdk_go_version: v1.32.6 generator_config_info: - file_checksum: 48323d41b9cb7ade08c8a87367846cccdd1f6409 + file_checksum: c8721c6d81ac66a3ea6c2b96b784c3da48b798b5 original_file_name: generator.yaml last_modification: reason: API generation diff --git a/apis/v1alpha1/generator.yaml b/apis/v1alpha1/generator.yaml index f981faf..4531bd4 100644 --- a/apis/v1alpha1/generator.yaml +++ b/apis/v1alpha1/generator.yaml @@ -25,15 +25,11 @@ operations: resources: Table: fields: - Replicas: + ReplicationGroup: custom_field: list_of: CreateReplicationGroupMemberAction compare: is_ignored: true - ReplicasDescriptions: - custom_field: - list_of: ReplicaDescription - is_read_only: true GlobalSecondaryIndexesDescriptions: custom_field: list_of: GlobalSecondaryIndexDescription @@ -73,13 +69,14 @@ resources: references: service_name: kms resource: Key - path: Status.ACKResourceMetadata.ARN + path: Status.ACKResourceMetadata.ARN exceptions: errors: 404: code: ResourceNotFoundException terminal_codes: - InvalidParameter + - ValidationException update_operation: custom_method_name: customUpdateTable hooks: diff --git a/apis/v1alpha1/table.go b/apis/v1alpha1/table.go index 4f7bf00..e04f49c 100644 --- a/apis/v1alpha1/table.go +++ b/apis/v1alpha1/table.go @@ -142,7 +142,7 @@ type TableSpec struct { // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. ProvisionedThroughput *ProvisionedThroughput `json:"provisionedThroughput,omitempty"` - Replicas []*CreateReplicationGroupMemberAction `json:"replicas,omitempty"` + ReplicationGroup []*CreateReplicationGroupMemberAction `json:"replicationGroup,omitempty"` // Represents the settings used to enable server-side encryption. SSESpecification *SSESpecification `json:"sseSpecification,omitempty"` // The settings for DynamoDB Streams on the table. These settings consist of: @@ -221,8 +221,9 @@ type TableStatus struct { // * StreamLabel // +kubebuilder:validation:Optional LatestStreamLabel *string `json:"latestStreamLabel,omitempty"` + // Represents replicas of the table. // +kubebuilder:validation:Optional - ReplicasDescriptions []*ReplicaDescription `json:"replicasDescriptions,omitempty"` + Replicas []*ReplicaDescription `json:"replicas,omitempty"` // Contains details for the restore. // +kubebuilder:validation:Optional RestoreSummary *RestoreSummary `json:"restoreSummary,omitempty"` diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index a97bee2..27eecf7 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -2739,8 +2739,8 @@ func (in *TableSpec) DeepCopyInto(out *TableSpec) { *out = new(ProvisionedThroughput) (*in).DeepCopyInto(*out) } - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas + if in.ReplicationGroup != nil { + in, out := &in.ReplicationGroup, &out.ReplicationGroup *out = make([]*CreateReplicationGroupMemberAction, len(*in)) for i := range *in { if (*in)[i] != nil { @@ -2857,8 +2857,8 @@ func (in *TableStatus) DeepCopyInto(out *TableStatus) { *out = new(string) **out = **in } - if in.ReplicasDescriptions != nil { - in, out := &in.ReplicasDescriptions, &out.ReplicasDescriptions + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas *out = make([]*ReplicaDescription, len(*in)) for i := range *in { if (*in)[i] != nil { diff --git a/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml b/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml index 896b71a..a4e03bd 100644 --- a/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml +++ b/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml @@ -322,7 +322,7 @@ spec: format: int64 type: integer type: object - replicas: + replicationGroup: items: description: Represents a replica to be created. properties: @@ -644,7 +644,8 @@ spec: * StreamLabel type: string - replicasDescriptions: + replicas: + description: Represents replicas of the table. items: description: Contains the details of the replica. properties: diff --git a/generator.yaml b/generator.yaml index f981faf..4531bd4 100644 --- a/generator.yaml +++ b/generator.yaml @@ -25,15 +25,11 @@ operations: resources: Table: fields: - Replicas: + ReplicationGroup: custom_field: list_of: CreateReplicationGroupMemberAction compare: is_ignored: true - ReplicasDescriptions: - custom_field: - list_of: ReplicaDescription - is_read_only: true GlobalSecondaryIndexesDescriptions: custom_field: list_of: GlobalSecondaryIndexDescription @@ -73,13 +69,14 @@ resources: references: service_name: kms resource: Key - path: Status.ACKResourceMetadata.ARN + path: Status.ACKResourceMetadata.ARN exceptions: errors: 404: code: ResourceNotFoundException terminal_codes: - InvalidParameter + - ValidationException update_operation: custom_method_name: customUpdateTable hooks: diff --git a/helm/crds/dynamodb.services.k8s.aws_tables.yaml b/helm/crds/dynamodb.services.k8s.aws_tables.yaml index f24770b..a8a7fc6 100644 --- a/helm/crds/dynamodb.services.k8s.aws_tables.yaml +++ b/helm/crds/dynamodb.services.k8s.aws_tables.yaml @@ -326,7 +326,7 @@ spec: format: int64 type: integer type: object - replicas: + replicationGroup: items: description: Represents a replica to be created. properties: @@ -648,7 +648,8 @@ spec: * StreamLabel type: string - replicasDescriptions: + replicas: + description: Represents replicas of the table. items: description: Contains the details of the replica. properties: diff --git a/pkg/resource/table/hooks.go b/pkg/resource/table/hooks.go index f37d1a4..2df3aac 100644 --- a/pkg/resource/table/hooks.go +++ b/pkg/resource/table/hooks.go @@ -15,6 +15,7 @@ package table import ( "context" + "errors" "fmt" "strings" "time" @@ -49,9 +50,9 @@ var ( "table GSIs in '%v' state, cannot be modified or deleted", svcsdktypes.IndexStatusCreating, ) - ErrReplicaNotActive = fmt.Errorf( - "table replica in NOT '%v' state, cannot be modified or deleted", - svcsdktypes.ReplicaStatusActive, + ErrTableReplicasUpdating = fmt.Errorf( + "table replica in '%v' state, cannot be modified or deleted", + svcsdktypes.ReplicaStatusUpdating, ) ) @@ -78,14 +79,14 @@ var ( ) requeueWaitWhileUpdating = ackrequeue.NeededAfter( ErrTableUpdating, - 5*time.Second, + 10*time.Second, ) requeueWaitGSIReady = ackrequeue.NeededAfter( ErrTableGSIsUpdating, 10*time.Second, ) - requeueWaitForReplicasActive = ackrequeue.NeededAfter( - ErrReplicaNotActive, + requeueWaitReplicasActive = ackrequeue.NeededAfter( + ErrTableReplicasUpdating, 10*time.Second, ) ) @@ -232,7 +233,16 @@ func (rm *resourceManager) customUpdateTable( } return nil, err } - case delta.DifferentAt("Spec.Replicas"): + case delta.DifferentAt("Spec.ReplicationGroup"): + if !hasStreamSpecificationWithNewAndOldImages(desired) { + msg := "table must have DynamoDB Streams enabled with StreamViewType set to NEW_AND_OLD_IMAGES for replica updates" + rlog.Debug(msg) + return nil, ackerr.NewTerminalError(errors.New(msg)) + } + + if !canUpdateTableReplicas(latest) { + return nil, requeueWaitReplicasActive + } if err := rm.syncReplicaUpdates(ctx, latest, desired); err != nil { return nil, err } @@ -274,10 +284,6 @@ func (rm *resourceManager) newUpdateTablePayload( input := &svcsdk.UpdateTableInput{ TableName: aws.String(*r.ko.Spec.TableName), } - // If delta is nil, we're just creating a basic payload for other operations to modify - if delta == nil { - return input, nil - } if delta.DifferentAt("Spec.BillingMode") { if r.ko.Spec.BillingMode != nil { @@ -574,12 +580,12 @@ func customPreCompare( } } - // Handle ReplicaUpdates comparison - if len(a.ko.Spec.Replicas) != len(b.ko.Spec.Replicas) { - delta.Add("Spec.Replicas", a.ko.Spec.Replicas, b.ko.Spec.Replicas) - } else if a.ko.Spec.Replicas != nil && b.ko.Spec.Replicas != nil { - if !equalReplicaArrays(a.ko.Spec.Replicas, b.ko.Spec.Replicas) { - delta.Add("Spec.Replicas", a.ko.Spec.Replicas, b.ko.Spec.Replicas) + // Handle ReplicaUpdates API comparison + if len(a.ko.Spec.ReplicationGroup) != len(b.ko.Spec.ReplicationGroup) { + delta.Add("Spec.ReplicationGroup", a.ko.Spec.ReplicationGroup, b.ko.Spec.ReplicationGroup) + } else if a.ko.Spec.ReplicationGroup != nil && b.ko.Spec.ReplicationGroup != nil { + if !equalReplicaArrays(a.ko.Spec.ReplicationGroup, b.ko.Spec.ReplicationGroup) { + delta.Add("Spec.ReplicationGroup", a.ko.Spec.ReplicationGroup, b.ko.Spec.ReplicationGroup) } } diff --git a/pkg/resource/table/hooks_replica_updates.go b/pkg/resource/table/hooks_replica_updates.go index ec5995f..1357dc5 100644 --- a/pkg/resource/table/hooks_replica_updates.go +++ b/pkg/resource/table/hooks_replica_updates.go @@ -1,16 +1,27 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + package table import ( "context" - "errors" - ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws-controllers-k8s/dynamodb-controller/apis/v1alpha1" svcsdk "github.com/aws/aws-sdk-go-v2/service/dynamodb" svcsdktypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + + "github.com/aws-controllers-k8s/dynamodb-controller/apis/v1alpha1" ) // equalCreateReplicationGroupMemberActions compares two CreateReplicationGroupMemberAction objects @@ -238,17 +249,11 @@ func deleteReplicaUpdate(regionName string) svcsdktypes.ReplicationGroupUpdate { // canUpdateTableReplicas returns true if it's possible to update table replicas. // We can only modify replicas when they are in ACTIVE state. func canUpdateTableReplicas(r *resource) bool { - if isTableCreating(r) || isTableDeleting(r) || isTableUpdating(r) { - return false - } - // Check if any replica is not in ACTIVE state - if r.ko.Status.ReplicasDescriptions != nil { - for _, replicaDesc := range r.ko.Status.ReplicasDescriptions { - if replicaDesc.RegionName != nil && replicaDesc.ReplicaStatus != nil { - if *replicaDesc.ReplicaStatus != string(svcsdktypes.ReplicaStatusActive) { - return false - } + for _, replicaDesc := range r.ko.Status.Replicas { + if replicaDesc.RegionName != nil && replicaDesc.ReplicaStatus != nil { + if *replicaDesc.ReplicaStatus != string(svcsdktypes.ReplicaStatusActive) { + return false } } } @@ -275,32 +280,15 @@ func (rm *resourceManager) syncReplicaUpdates( ) (err error) { rlog := ackrtlog.FromContext(ctx) exit := rlog.Trace("rm.syncReplicaUpdates") - defer exit(err) - - if !hasStreamSpecificationWithNewAndOldImages(desired) { - msg := "table must have DynamoDB Streams enabled with StreamViewType set to NEW_AND_OLD_IMAGES for replica updates" - rlog.Debug(msg) - return ackerr.NewTerminalError(errors.New(msg)) - } - - if !canUpdateTableReplicas(latest) { - return requeueWaitForReplicasActive - } - - if isTableUpdating(latest) { - return requeueWaitWhileUpdating - } + defer func() { + exit(err) + }() input, replicasInQueue, err := rm.newUpdateTableReplicaUpdatesOneAtATimePayload(ctx, latest, desired) if err != nil { return err } - // If there are no updates to make, we don't need to requeue - if len(input.ReplicaUpdates) == 0 { - return nil - } - // Call the UpdateTable API _, err = rm.sdkapi.UpdateTable(ctx, input) rm.metrics.RecordAPICall("UPDATE", "UpdateTable", err) @@ -328,7 +316,9 @@ func (rm *resourceManager) newUpdateTableReplicaUpdatesOneAtATimePayload( ) (input *svcsdk.UpdateTableInput, replicasInQueue int, err error) { rlog := ackrtlog.FromContext(ctx) exit := rlog.Trace("rm.newUpdateTableReplicaUpdatesOneAtATimePayload") - defer exit(err) + defer func() { + exit(err) + }() createReplicas, updateReplicas, deleteRegions := calculateReplicaUpdates(latest, desired) @@ -378,8 +368,8 @@ func calculateReplicaUpdates( deleteRegions []string, ) { existingRegions := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) - if latest != nil && latest.ko.Spec.Replicas != nil { - for _, replica := range latest.ko.Spec.Replicas { + if latest != nil && latest.ko.Spec.ReplicationGroup != nil { + for _, replica := range latest.ko.Spec.ReplicationGroup { if replica.RegionName != nil { existingRegions[*replica.RegionName] = replica } @@ -387,8 +377,8 @@ func calculateReplicaUpdates( } desiredRegions := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) - if desired != nil && desired.ko.Spec.Replicas != nil { - for _, replica := range desired.ko.Spec.Replicas { + if desired != nil && desired.ko.Spec.ReplicationGroup != nil { + for _, replica := range desired.ko.Spec.ReplicationGroup { if replica.RegionName != nil { desiredRegions[*replica.RegionName] = replica } diff --git a/pkg/resource/table/manager.go b/pkg/resource/table/manager.go index dda8240..04a3b87 100644 --- a/pkg/resource/table/manager.go +++ b/pkg/resource/table/manager.go @@ -299,9 +299,9 @@ func (rm *resourceManager) EnsureTags( defaultTags := ackrt.GetDefaultTags(&rm.cfg, r.ko, md) var existingTags []*svcapitypes.Tag existingTags = r.ko.Spec.Tags - resourceTags := ToACKTags(existingTags) + resourceTags, keyOrder := convertToOrderedACKTags(existingTags) tags := acktags.Merge(resourceTags, defaultTags) - r.ko.Spec.Tags = FromACKTags(tags) + r.ko.Spec.Tags = fromACKTags(tags, keyOrder) return nil } @@ -318,9 +318,9 @@ func (rm *resourceManager) FilterSystemTags(res acktypes.AWSResource) { } var existingTags []*svcapitypes.Tag existingTags = r.ko.Spec.Tags - resourceTags := ToACKTags(existingTags) + resourceTags, tagKeyOrder := convertToOrderedACKTags(existingTags) ignoreSystemTags(resourceTags) - r.ko.Spec.Tags = FromACKTags(resourceTags) + r.ko.Spec.Tags = fromACKTags(resourceTags, tagKeyOrder) } // mirrorAWSTags ensures that AWS tags are included in the desired resource @@ -342,10 +342,10 @@ func mirrorAWSTags(a *resource, b *resource) { var existingDesiredTags []*svcapitypes.Tag existingDesiredTags = a.ko.Spec.Tags existingLatestTags = b.ko.Spec.Tags - desiredTags := ToACKTags(existingDesiredTags) - latestTags := ToACKTags(existingLatestTags) + desiredTags, desiredTagKeyOrder := convertToOrderedACKTags(existingDesiredTags) + latestTags, _ := convertToOrderedACKTags(existingLatestTags) syncAWSTags(desiredTags, latestTags) - a.ko.Spec.Tags = FromACKTags(desiredTags) + a.ko.Spec.Tags = fromACKTags(desiredTags, desiredTagKeyOrder) } // newResourceManager returns a new struct implementing diff --git a/pkg/resource/table/sdk.go b/pkg/resource/table/sdk.go index 6f8588c..9a9d639 100644 --- a/pkg/resource/table/sdk.go +++ b/pkg/resource/table/sdk.go @@ -264,13 +264,13 @@ func (rm *resourceManager) sdkFind( ko.Spec.ProvisionedThroughput = nil } if resp.Table.Replicas != nil { - f12 := []*svcapitypes.CreateReplicationGroupMemberAction{} + f12 := []*svcapitypes.ReplicaDescription{} for _, f12iter := range resp.Table.Replicas { - f12elem := &svcapitypes.CreateReplicationGroupMemberAction{} + f12elem := &svcapitypes.ReplicaDescription{} if f12iter.GlobalSecondaryIndexes != nil { - f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndex{} + f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndexDescription{} for _, f12elemf0iter := range f12iter.GlobalSecondaryIndexes { - f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndex{} + f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndexDescription{} if f12elemf0iter.IndexName != nil { f12elemf0elem.IndexName = f12elemf0iter.IndexName } @@ -298,11 +298,33 @@ func (rm *resourceManager) sdkFind( if f12iter.RegionName != nil { f12elem.RegionName = f12iter.RegionName } + if f12iter.ReplicaInaccessibleDateTime != nil { + f12elem.ReplicaInaccessibleDateTime = &metav1.Time{*f12iter.ReplicaInaccessibleDateTime} + } + if f12iter.ReplicaStatus != "" { + f12elem.ReplicaStatus = aws.String(string(f12iter.ReplicaStatus)) + } + if f12iter.ReplicaStatusDescription != nil { + f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription + } + if f12iter.ReplicaStatusPercentProgress != nil { + f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress + } + if f12iter.ReplicaTableClassSummary != nil { + f12elemf8 := &svcapitypes.TableClassSummary{} + if f12iter.ReplicaTableClassSummary.LastUpdateDateTime != nil { + f12elemf8.LastUpdateDateTime = &metav1.Time{*f12iter.ReplicaTableClassSummary.LastUpdateDateTime} + } + if f12iter.ReplicaTableClassSummary.TableClass != "" { + f12elemf8.TableClass = aws.String(string(f12iter.ReplicaTableClassSummary.TableClass)) + } + f12elem.ReplicaTableClassSummary = f12elemf8 + } f12 = append(f12, f12elem) } - ko.Spec.Replicas = f12 + ko.Status.Replicas = f12 } else { - ko.Spec.Replicas = nil + ko.Status.Replicas = nil } if resp.Table.RestoreSummary != nil { f13 := &svcapitypes.RestoreSummary{} @@ -419,37 +441,43 @@ func (rm *resourceManager) sdkFind( ko.Spec.BillingMode = aws.String("PROVISIONED") } if resp.Table.Replicas != nil { - f12 := []*svcapitypes.ReplicaDescription{} - for _, f12iter := range resp.Table.Replicas { - f12elem := &svcapitypes.ReplicaDescription{} - if f12iter.RegionName != nil { - f12elem.RegionName = f12iter.RegionName + replicationGroup := []*svcapitypes.CreateReplicationGroupMemberAction{} + for _, replica := range resp.Table.Replicas { + replicaElem := &svcapitypes.CreateReplicationGroupMemberAction{} + if replica.RegionName != nil { + replicaElem.RegionName = replica.RegionName } - if f12iter.ReplicaStatus != "" { - f12elem.ReplicaStatus = aws.String(string(f12iter.ReplicaStatus)) - } else { - f12elem.ReplicaStatus = aws.String("Unknown") + if replica.KMSMasterKeyId != nil { + replicaElem.KMSMasterKeyID = replica.KMSMasterKeyId } - if f12iter.ReplicaStatusDescription != nil { - f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription - } else { - f12elem.ReplicaStatusDescription = aws.String("") + if replica.ProvisionedThroughputOverride != nil { + replicaElem.ProvisionedThroughputOverride = &svcapitypes.ProvisionedThroughputOverride{ + ReadCapacityUnits: replica.ProvisionedThroughputOverride.ReadCapacityUnits, + } } - if f12iter.ReplicaStatusPercentProgress != nil { - f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress - } else { - f12elem.ReplicaStatusPercentProgress = aws.String("0") + if replica.GlobalSecondaryIndexes != nil { + gsiList := []*svcapitypes.ReplicaGlobalSecondaryIndex{} + for _, gsi := range replica.GlobalSecondaryIndexes { + gsiElem := &svcapitypes.ReplicaGlobalSecondaryIndex{ + IndexName: gsi.IndexName, + } + if gsi.ProvisionedThroughputOverride != nil { + gsiElem.ProvisionedThroughputOverride = &svcapitypes.ProvisionedThroughputOverride{ + ReadCapacityUnits: gsi.ProvisionedThroughputOverride.ReadCapacityUnits, + } + } + gsiList = append(gsiList, gsiElem) + } + replicaElem.GlobalSecondaryIndexes = gsiList } - if f12iter.ReplicaInaccessibleDateTime != nil { - f12elem.ReplicaInaccessibleDateTime = &metav1.Time{Time: *f12iter.ReplicaInaccessibleDateTime} - } else { - f12elem.ReplicaInaccessibleDateTime = nil + if replica.ReplicaTableClassSummary != nil && replica.ReplicaTableClassSummary.TableClass != "" { + replicaElem.TableClassOverride = aws.String(string(replica.ReplicaTableClassSummary.TableClass)) } - f12 = append(f12, f12elem) + replicationGroup = append(replicationGroup, replicaElem) } - ko.Status.ReplicasDescriptions = f12 + ko.Spec.ReplicationGroup = replicationGroup } else { - ko.Status.ReplicasDescriptions = nil + ko.Spec.ReplicationGroup = nil } if isTableCreating(&resource{ko}) { return &resource{ko}, requeueWaitWhileCreating @@ -692,13 +720,13 @@ func (rm *resourceManager) sdkCreate( ko.Spec.ProvisionedThroughput = nil } if resp.TableDescription.Replicas != nil { - f12 := []*svcapitypes.CreateReplicationGroupMemberAction{} + f12 := []*svcapitypes.ReplicaDescription{} for _, f12iter := range resp.TableDescription.Replicas { - f12elem := &svcapitypes.CreateReplicationGroupMemberAction{} + f12elem := &svcapitypes.ReplicaDescription{} if f12iter.GlobalSecondaryIndexes != nil { - f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndex{} + f12elemf0 := []*svcapitypes.ReplicaGlobalSecondaryIndexDescription{} for _, f12elemf0iter := range f12iter.GlobalSecondaryIndexes { - f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndex{} + f12elemf0elem := &svcapitypes.ReplicaGlobalSecondaryIndexDescription{} if f12elemf0iter.IndexName != nil { f12elemf0elem.IndexName = f12elemf0iter.IndexName } @@ -726,11 +754,33 @@ func (rm *resourceManager) sdkCreate( if f12iter.RegionName != nil { f12elem.RegionName = f12iter.RegionName } + if f12iter.ReplicaInaccessibleDateTime != nil { + f12elem.ReplicaInaccessibleDateTime = &metav1.Time{*f12iter.ReplicaInaccessibleDateTime} + } + if f12iter.ReplicaStatus != "" { + f12elem.ReplicaStatus = aws.String(string(f12iter.ReplicaStatus)) + } + if f12iter.ReplicaStatusDescription != nil { + f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription + } + if f12iter.ReplicaStatusPercentProgress != nil { + f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress + } + if f12iter.ReplicaTableClassSummary != nil { + f12elemf8 := &svcapitypes.TableClassSummary{} + if f12iter.ReplicaTableClassSummary.LastUpdateDateTime != nil { + f12elemf8.LastUpdateDateTime = &metav1.Time{*f12iter.ReplicaTableClassSummary.LastUpdateDateTime} + } + if f12iter.ReplicaTableClassSummary.TableClass != "" { + f12elemf8.TableClass = aws.String(string(f12iter.ReplicaTableClassSummary.TableClass)) + } + f12elem.ReplicaTableClassSummary = f12elemf8 + } f12 = append(f12, f12elem) } - ko.Spec.Replicas = f12 + ko.Status.Replicas = f12 } else { - ko.Spec.Replicas = nil + ko.Status.Replicas = nil } if resp.TableDescription.RestoreSummary != nil { f13 := &svcapitypes.RestoreSummary{} @@ -797,9 +847,8 @@ func (rm *resourceManager) sdkCreate( } } // Check if replicas were specified during creation - if desired.ko.Spec.Replicas != nil && len(desired.ko.Spec.Replicas) > 0 { - // Copy the replica configuration to the new resource - ko.Spec.Replicas = desired.ko.Spec.Replicas + if len(desired.ko.Spec.ReplicationGroup) > 0 { + ko.Spec.ReplicationGroup = desired.ko.Spec.ReplicationGroup // Return with a requeue to process replica updates // This will trigger the reconciliation loop again, which will call syncReplicaUpdates @@ -1016,26 +1065,18 @@ func (rm *resourceManager) sdkDelete( } // If there are replicas, we need to remove them before deleting the table - if r.ko.Spec.Replicas != nil && len(r.ko.Spec.Replicas) > 0 { - // Create a desired state with no replicas + if len(r.ko.Spec.ReplicationGroup) > 0 { desired := &resource{ ko: r.ko.DeepCopy(), } - desired.ko.Spec.Replicas = nil + desired.ko.Spec.ReplicationGroup = nil - // Call syncReplicaUpdates to remove all replicas err := rm.syncReplicaUpdates(ctx, r, desired) if err != nil { - if err == requeueWaitWhileUpdating { - // This is expected - we need to wait for the replica removal to complete - return nil, err - } return nil, err } } - r.ko.Spec.Replicas = nil - input, err := rm.newDeleteRequestPayload(r) if err != nil { return nil, err @@ -1169,7 +1210,8 @@ func (rm *resourceManager) terminalAWSError(err error) bool { return false } switch terminalErr.ErrorCode() { - case "InvalidParameter": + case "InvalidParameter", + "ValidationException": return true default: return false diff --git a/pkg/resource/table/tags.go b/pkg/resource/table/tags.go index fa49d2a..0b84ac7 100644 --- a/pkg/resource/table/tags.go +++ b/pkg/resource/table/tags.go @@ -30,39 +30,51 @@ var ( ACKSystemTags = []string{"services.k8s.aws/namespace", "services.k8s.aws/controller-version"} ) -// ToACKTags converts the tags parameter into 'acktags.Tags' shape. +// convertToOrderedACKTags converts the tags parameter into 'acktags.Tags' shape. // This method helps in creating the hub(acktags.Tags) for merging -// default controller tags with existing resource tags. -func ToACKTags(tags []*svcapitypes.Tag) acktags.Tags { +// default controller tags with existing resource tags. It also returns a slice +// of keys maintaining the original key Order when the tags are a list +func convertToOrderedACKTags(tags []*svcapitypes.Tag) (acktags.Tags, []string) { result := acktags.NewTags() - if tags == nil || len(tags) == 0 { - return result - } + keyOrder := []string{} + if len(tags) == 0 { + return result, keyOrder + } for _, t := range tags { if t.Key != nil { - if t.Value == nil { - result[*t.Key] = "" - } else { + keyOrder = append(keyOrder, *t.Key) + if t.Value != nil { result[*t.Key] = *t.Value + } else { + result[*t.Key] = "" } } } - return result + return result, keyOrder } -// FromACKTags converts the tags parameter into []*svcapitypes.Tag shape. +// fromACKTags converts the tags parameter into []*svcapitypes.Tag shape. // This method helps in setting the tags back inside AWSResource after merging -// default controller tags with existing resource tags. -func FromACKTags(tags acktags.Tags) []*svcapitypes.Tag { +// default controller tags with existing resource tags. When a list, +// it maintains the order from original +func fromACKTags(tags acktags.Tags, keyOrder []string) []*svcapitypes.Tag { result := []*svcapitypes.Tag{} + + for _, k := range keyOrder { + v, ok := tags[k] + if ok { + tag := svcapitypes.Tag{Key: &k, Value: &v} + result = append(result, &tag) + delete(tags, k) + } + } for k, v := range tags { - kCopy := k - vCopy := v - tag := svcapitypes.Tag{Key: &kCopy, Value: &vCopy} + tag := svcapitypes.Tag{Key: &k, Value: &v} result = append(result, &tag) } + return result } diff --git a/templates/hooks/table/sdk_create_post_set_output.go.tpl b/templates/hooks/table/sdk_create_post_set_output.go.tpl index 824bfad..44d80d4 100644 --- a/templates/hooks/table/sdk_create_post_set_output.go.tpl +++ b/templates/hooks/table/sdk_create_post_set_output.go.tpl @@ -4,9 +4,8 @@ } } // Check if replicas were specified during creation - if desired.ko.Spec.Replicas != nil && len(desired.ko.Spec.Replicas) > 0 { - // Copy the replica configuration to the new resource - ko.Spec.Replicas = desired.ko.Spec.Replicas + if len(desired.ko.Spec.ReplicationGroup) > 0 { + ko.Spec.ReplicationGroup = desired.ko.Spec.ReplicationGroup // Return with a requeue to process replica updates // This will trigger the reconciliation loop again, which will call syncReplicaUpdates diff --git a/templates/hooks/table/sdk_delete_pre_build_request.go.tpl b/templates/hooks/table/sdk_delete_pre_build_request.go.tpl index 544321c..06d976b 100644 --- a/templates/hooks/table/sdk_delete_pre_build_request.go.tpl +++ b/templates/hooks/table/sdk_delete_pre_build_request.go.tpl @@ -6,22 +6,14 @@ } // If there are replicas, we need to remove them before deleting the table - if r.ko.Spec.Replicas != nil && len(r.ko.Spec.Replicas) > 0 { - // Create a desired state with no replicas + if len(r.ko.Spec.ReplicationGroup) > 0 { desired := &resource{ ko: r.ko.DeepCopy(), } - desired.ko.Spec.Replicas = nil + desired.ko.Spec.ReplicationGroup = nil - // Call syncReplicaUpdates to remove all replicas err := rm.syncReplicaUpdates(ctx, r, desired) if err != nil { - if err == requeueWaitWhileUpdating { - // This is expected - we need to wait for the replica removal to complete - return nil, err - } return nil, err } } - - r.ko.Spec.Replicas = nil diff --git a/templates/hooks/table/sdk_read_one_post_set_output.go.tpl b/templates/hooks/table/sdk_read_one_post_set_output.go.tpl index 0047844..e629f72 100644 --- a/templates/hooks/table/sdk_read_one_post_set_output.go.tpl +++ b/templates/hooks/table/sdk_read_one_post_set_output.go.tpl @@ -54,43 +54,43 @@ ko.Spec.BillingMode = aws.String("PROVISIONED") } if resp.Table.Replicas != nil { - f12 := []*svcapitypes.ReplicaDescription{} - for _, f12iter := range resp.Table.Replicas { - f12elem := &svcapitypes.ReplicaDescription{} - if f12iter.RegionName != nil { - f12elem.RegionName = f12iter.RegionName + replicationGroup := []*svcapitypes.CreateReplicationGroupMemberAction{} + for _, replica := range resp.Table.Replicas { + replicaElem := &svcapitypes.CreateReplicationGroupMemberAction{} + if replica.RegionName != nil { + replicaElem.RegionName = replica.RegionName } - if f12iter.ReplicaStatus != "" { - f12elem.ReplicaStatus = aws.String(string(f12iter.ReplicaStatus)) - } else { - f12elem.ReplicaStatus = aws.String("Unknown") + if replica.KMSMasterKeyId != nil { + replicaElem.KMSMasterKeyID = replica.KMSMasterKeyId } - if f12iter.ReplicaStatusDescription != nil { - f12elem.ReplicaStatusDescription = f12iter.ReplicaStatusDescription - } else { - f12elem.ReplicaStatusDescription = aws.String("") + if replica.ProvisionedThroughputOverride != nil { + replicaElem.ProvisionedThroughputOverride = &svcapitypes.ProvisionedThroughputOverride{ + ReadCapacityUnits: replica.ProvisionedThroughputOverride.ReadCapacityUnits, + } } - if f12iter.ReplicaStatusPercentProgress != nil { - f12elem.ReplicaStatusPercentProgress = f12iter.ReplicaStatusPercentProgress - } else { - f12elem.ReplicaStatusPercentProgress = aws.String("0") + if replica.GlobalSecondaryIndexes != nil { + gsiList := []*svcapitypes.ReplicaGlobalSecondaryIndex{} + for _, gsi := range replica.GlobalSecondaryIndexes { + gsiElem := &svcapitypes.ReplicaGlobalSecondaryIndex{ + IndexName: gsi.IndexName, + } + if gsi.ProvisionedThroughputOverride != nil { + gsiElem.ProvisionedThroughputOverride = &svcapitypes.ProvisionedThroughputOverride{ + ReadCapacityUnits: gsi.ProvisionedThroughputOverride.ReadCapacityUnits, + } + } + gsiList = append(gsiList, gsiElem) + } + replicaElem.GlobalSecondaryIndexes = gsiList } - if f12iter.ReplicaInaccessibleDateTime != nil { - f12elem.ReplicaInaccessibleDateTime = &metav1.Time{Time: *f12iter.ReplicaInaccessibleDateTime} - } else { - f12elem.ReplicaInaccessibleDateTime = nil + if replica.ReplicaTableClassSummary != nil && replica.ReplicaTableClassSummary.TableClass != "" { + replicaElem.TableClassOverride = aws.String(string(replica.ReplicaTableClassSummary.TableClass)) } - if f12iter.ReplicaTableClassSummary != nil && f12iter.ReplicaTableClassSummary.TableClass != "" { - f12elem.ReplicaTableClassSummary.TableClass = aws.String(string(f12iter.ReplicaTableClassSummary.TableClass)) - f12elem.ReplicaTableClassSummary.LastUpdateDateTime = &metav1.Time{Time: *f12iter.ReplicaTableClassSummary.LastUpdateDateTime} - } else { - f12elem.ReplicaTableClassSummary.TableClass = aws.String("STANDARD") - } - f12 = append(f12, f12elem) + replicationGroup = append(replicationGroup, replicaElem) } - ko.Status.ReplicasDescriptions = f12 + ko.Spec.ReplicationGroup = replicationGroup } else { - ko.Status.ReplicasDescriptions = nil + ko.Spec.ReplicationGroup = nil } if isTableCreating(&resource{ko}) { return &resource{ko}, requeueWaitWhileCreating diff --git a/test/e2e/resources/table_with_gsi_and_replicas.yaml b/test/e2e/resources/table_with_gsi_and_replicas.yaml new file mode 100644 index 0000000..2ee6769 --- /dev/null +++ b/test/e2e/resources/table_with_gsi_and_replicas.yaml @@ -0,0 +1,45 @@ +apiVersion: dynamodb.services.k8s.aws/v1alpha1 +kind: Table +metadata: + name: $TABLE_NAME +spec: + tableName: $TABLE_NAME + tableClass: STANDARD + attributeDefinitions: + - attributeName: PK + attributeType: S + - attributeName: SK + attributeType: S + - attributeName: GSI1PK + attributeType: S + - attributeName: GSI1SK + attributeType: S + - attributeName: GSI2PK + attributeType: S + keySchema: + - attributeName: PK + keyType: HASH + - attributeName: SK + keyType: RANGE + billingMode: PAY_PER_REQUEST + streamSpecification: + streamEnabled: true + streamViewType: "NEW_AND_OLD_IMAGES" + replicationGroup: + - regionName: $REPLICA_REGION_1 + - regionName: $REPLICA_REGION_2 + globalSecondaryIndexes: + - indexName: GSI1 + keySchema: + - attributeName: GSI1PK + keyType: HASH + - attributeName: GSI1SK + keyType: RANGE + projection: + projectionType: ALL + - indexName: GSI2 + keySchema: + - attributeName: GSI2PK + keyType: HASH + projection: + projectionType: KEYS_ONLY \ No newline at end of file diff --git a/test/e2e/resources/table_with_replicas.yaml b/test/e2e/resources/table_with_replicas.yaml new file mode 100644 index 0000000..08da660 --- /dev/null +++ b/test/e2e/resources/table_with_replicas.yaml @@ -0,0 +1,24 @@ +apiVersion: dynamodb.services.k8s.aws/v1alpha1 +kind: Table +metadata: + name: $TABLE_NAME +spec: + tableName: $TABLE_NAME + tableClass: STANDARD + attributeDefinitions: + - attributeName: PK + attributeType: S + - attributeName: SK + attributeType: S + keySchema: + - attributeName: PK + keyType: HASH + - attributeName: SK + keyType: RANGE + billingMode: PAY_PER_REQUEST + streamSpecification: + streamEnabled: true + streamViewType: "NEW_AND_OLD_IMAGES" + replicationGroup: + - regionName: $REPLICA_REGION_1 + - regionName: $REPLICA_REGION_2 diff --git a/test/e2e/resources/table_with_replicas_invalid.yaml b/test/e2e/resources/table_with_replicas_invalid.yaml new file mode 100644 index 0000000..a75eff7 --- /dev/null +++ b/test/e2e/resources/table_with_replicas_invalid.yaml @@ -0,0 +1,20 @@ +apiVersion: dynamodb.services.k8s.aws/v1alpha1 +kind: Table +metadata: + name: $TABLE_NAME +spec: + tableName: $TABLE_NAME + tableClass: STANDARD + attributeDefinitions: + - attributeName: PK + attributeType: S + - attributeName: SK + attributeType: S + keySchema: + - attributeName: PK + keyType: HASH + - attributeName: SK + keyType: RANGE + billingMode: PAY_PER_REQUEST + replicationGroup: + - regionName: $REPLICA_REGION_1 diff --git a/test/e2e/table.py b/test/e2e/table.py index 5deb005..fff5b00 100644 --- a/test/e2e/table.py +++ b/test/e2e/table.py @@ -260,3 +260,61 @@ def get_point_in_time_recovery_enabled(table_name): return resp['ContinuousBackupsDescription']['PointInTimeRecoveryDescription']['PointInTimeRecoveryStatus'] == 'ENABLED' except c.exceptions.ResourceNotFoundException: return None + + +class ReplicaMatcher: + def __init__(self, expected_regions): + self.expected_regions = expected_regions + + def __call__(self, record: dict) -> bool: + if 'Replicas' not in record: + return False + + actual_regions = set() + for replica in record['Replicas']: + if 'RegionName' in replica: + actual_regions.add(replica['RegionName']) + + return set(self.expected_regions) == actual_regions + + +def replicas_match(expected_regions) -> TableMatchFunc: + return ReplicaMatcher(expected_regions) + + +class ReplicaStatusMatcher: + def __init__(self, region, status): + self.region = region + self.status = status + + def __call__(self, record: dict) -> bool: + if 'Replicas' not in record: + return False + + for replica in record['Replicas']: + if 'RegionName' in replica and replica['RegionName'] == self.region: + return 'ReplicaStatus' in replica and replica['ReplicaStatus'] == self.status + + return False + + +def replica_status_matches(region, status) -> TableMatchFunc: + return ReplicaStatusMatcher(region, status) + + +def get_replicas(table_name): + """Returns the replicas for a DynamoDB table + + Args: + table_name: the name of the DynamoDB table to get replicas for + Returns: + A list of replicas or None if the table doesn't exist + """ + dynamodb = boto3.client('dynamodb') + try: + response = dynamodb.describe_table(TableName=table_name) + if 'Table' in response and 'Replicas' in response['Table']: + return response['Table']['Replicas'] + return [] + except dynamodb.exceptions.ResourceNotFoundException: + return None diff --git a/test/e2e/tests/test_gsi_and_replicas.py b/test/e2e/tests/test_gsi_and_replicas.py new file mode 100644 index 0000000..f85ae42 --- /dev/null +++ b/test/e2e/tests/test_gsi_and_replicas.py @@ -0,0 +1,405 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may +# not use this file except in compliance with the License. A copy of the +# License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Integration tests for DynamoDB Tables with both GSIs and Replicas. +""" + +import logging +import time +from typing import Dict, Tuple + +import boto3 +import pytest +from acktest import tags +from acktest.k8s import resource as k8s +from acktest.resources import random_suffix_name +from e2e import (CRD_GROUP, CRD_VERSION, condition, get_resource_tags, + load_dynamodb_resource, service_marker, table, + wait_for_cr_status) +from e2e.replacement_values import REPLACEMENT_VALUES + +RESOURCE_PLURAL = "tables" + +DELETE_WAIT_AFTER_SECONDS = 15 +MODIFY_WAIT_AFTER_SECONDS = 90 +REPLICA_WAIT_AFTER_SECONDS = 300 # Replicas can take longer to create/update + +REPLICA_REGION_1 = "us-east-1" +REPLICA_REGION_2 = "eu-west-1" +REPLICA_REGION_3 = "eu-central-1" + + +def create_table_with_gsi_and_replicas(name: str): + replacements = REPLACEMENT_VALUES.copy() + replacements["TABLE_NAME"] = name + replacements["REPLICA_REGION_1"] = REPLICA_REGION_1 + replacements["REPLICA_REGION_2"] = REPLICA_REGION_2 + + resource_data = load_dynamodb_resource( + "table_with_gsi_and_replicas", + additional_replacements=replacements, + ) + logging.debug(resource_data) + + # Create the k8s resource + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + name, namespace="default", + ) + k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + assert k8s.get_resource_exists(ref) + + return (ref, cr) + + +@pytest.fixture(scope="function") +def table_with_gsi_and_replicas(): + table_name = random_suffix_name("table-gsi-replicas", 32) + + (ref, res) = create_table_with_gsi_and_replicas(table_name) + + yield (ref, res) + + # Delete the k8s resource if it still exists + if k8s.get_resource_exists(ref): + k8s.delete_custom_resource(ref) + time.sleep(DELETE_WAIT_AFTER_SECONDS) + + +@service_marker +@pytest.mark.canary +class TestTableGSIAndReplicas: + def table_exists(self, table_name: str) -> bool: + return table.get(table_name) is not None + + def test_gsi_and_replicas_lifecycle(self, table_with_gsi_and_replicas): + """Test full lifecycle of a table with both GSIs and replicas, including valid and invalid update scenarios: + 1. Create table with 2 GSIs and 2 replicas + 2. Verify GSIs and replicas are created correctly + 3. Test valid update: Update GSIs and replicas simultaneously + 4. Test invalid update: Try to add GSI only to parent but not replicas + 5. Remove all GSIs and replicas + 6. Delete table + """ + (ref, res) = table_with_gsi_and_replicas + table_name = res["spec"]["tableName"] + + # Check DynamoDB Table exists + assert self.table_exists(table_name) + + # Wait for the table to be active + table.wait_until( + table_name, + table.status_matches("ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=15, + ) + + # Step 1: Verify initial GSIs were created + table.wait_until( + table_name, + table.gsi_matches([ + { + "indexName": "GSI1", + "keySchema": [ + {"attributeName": "GSI1PK", "keyType": "HASH"}, + {"attributeName": "GSI1SK", "keyType": "RANGE"} + ], + "projection": { + "projectionType": "ALL" + } + }, + { + "indexName": "GSI2", + "keySchema": [ + {"attributeName": "GSI2PK", "keyType": "HASH"} + ], + "projection": { + "projectionType": "KEYS_ONLY" + } + } + ]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=15, + ) + + # Wait for both initial replicas to be created + table.wait_until( + table_name, + table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=15, + ) + + # Verify both GSIs and replicas are in correct state + logging.info("Verifying initial GSIs and replicas...") + table_info = table.get(table_name) + assert "GlobalSecondaryIndexes" in table_info + assert len(table_info["GlobalSecondaryIndexes"]) == 2 + + replicas = table.get_replicas(table_name) + assert replicas is not None + assert len(replicas) == 2 + region_names = [r["RegionName"] for r in replicas] + assert REPLICA_REGION_1 in region_names + assert REPLICA_REGION_2 in region_names + + # Step 2: Valid update - Update GSIs and replicas simultaneously + logging.info( + "Performing valid update: Updating GSIs and replicas simultaneously...") + cr = k8s.wait_resource_consumed_by_controller(ref) + + # Update attribute definitions to include a new GSI3 key + cr["spec"]["attributeDefinitions"] = [ + {"attributeName": "PK", "attributeType": "S"}, + {"attributeName": "SK", "attributeType": "S"}, + {"attributeName": "GSI1PK", "attributeType": "S"}, + {"attributeName": "GSI1SK", "attributeType": "S"}, + {"attributeName": "GSI3PK", "attributeType": "S"}, + ] + + # Remove GSI2, keep GSI1, add GSI3 + cr["spec"]["globalSecondaryIndexes"] = [ + { + "indexName": "GSI1", + "keySchema": [ + {"attributeName": "GSI1PK", "keyType": "HASH"}, + {"attributeName": "GSI1SK", "keyType": "RANGE"} + ], + "projection": { + "projectionType": "ALL" + } + }, + { + "indexName": "GSI3", + "keySchema": [ + {"attributeName": "GSI3PK", "keyType": "HASH"} + ], + "projection": { + "projectionType": "ALL" + } + } + ] + + # Remove REPLICA_REGION_2, keep REPLICA_REGION_1, add REPLICA_REGION_3 + cr["spec"]["replicationGroup"] = [ + {"regionName": REPLICA_REGION_1}, + {"regionName": REPLICA_REGION_3} + ] + + # Patch k8s resource with all changes at once + k8s.patch_custom_resource(ref, cr) + + # Wait for GSIs to be updated + table.wait_until( + table_name, + table.gsi_matches([ + { + "indexName": "GSI1", + "keySchema": [ + {"attributeName": "GSI1PK", "keyType": "HASH"}, + {"attributeName": "GSI1SK", "keyType": "RANGE"} + ], + "projection": { + "projectionType": "ALL" + } + }, + { + "indexName": "GSI3", + "keySchema": [ + {"attributeName": "GSI3PK", "keyType": "HASH"} + ], + "projection": { + "projectionType": "ALL" + } + } + ]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*2, + interval_seconds=15, + ) + + # Wait for replicas to be updated + table.wait_until( + table_name, + table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_3]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*2, + interval_seconds=15, + ) + + # Verify updates were applied correctly + table_info = table.get(table_name) + gsi_names = [gsi["IndexName"] + for gsi in table_info["GlobalSecondaryIndexes"]] + assert "GSI1" in gsi_names + assert "GSI3" in gsi_names + assert "GSI2" not in gsi_names + + replicas = table.get_replicas(table_name) + region_names = [r["RegionName"] for r in replicas] + assert REPLICA_REGION_1 in region_names + assert REPLICA_REGION_3 in region_names + assert REPLICA_REGION_2 not in region_names + + # Step 3: Test invalid update - try to add GSI only to parent table + logging.info( + "Testing invalid update: Adding GSI only to parent table but not replicas...") + cr = k8s.wait_resource_consumed_by_controller(ref) + + # Update attribute definitions to include a GSI that would only apply to parent + cr["spec"]["attributeDefinitions"] = [ + {"attributeName": "PK", "attributeType": "S"}, + {"attributeName": "SK", "attributeType": "S"}, + {"attributeName": "GSI1PK", "attributeType": "S"}, + {"attributeName": "GSI1SK", "attributeType": "S"}, + {"attributeName": "GSI3PK", "attributeType": "S"}, + {"attributeName": "GSI4PK", "attributeType": "S"}, + ] + + # Add a new GSI4 and attempt to define it for parent table but not replicas + cr["spec"]["globalSecondaryIndexes"] = [ + { + "indexName": "GSI1", + "keySchema": [ + {"attributeName": "GSI1PK", "keyType": "HASH"}, + {"attributeName": "GSI1SK", "keyType": "RANGE"} + ], + "projection": { + "projectionType": "ALL" + } + }, + { + "indexName": "GSI3", + "keySchema": [ + {"attributeName": "GSI3PK", "keyType": "HASH"} + ], + "projection": { + "projectionType": "ALL" + } + }, + { + "indexName": "GSI4", + "keySchema": [ + {"attributeName": "GSI4PK", "keyType": "HASH"} + ], + "projection": { + "projectionType": "ALL" + }, + "replicaDefinitions": [ + { + "regionName": REPLICA_REGION_1, + "propagateToReplica": False # Not including this in replica + } + ] + } + ] + + # Patch k8s resource with invalid configuration + k8s.patch_custom_resource(ref, cr) + + # Wait for terminal condition to be set or timeout + max_wait_seconds = 60 + interval_seconds = 5 + start_time = time.time() + terminal_condition_set = False + error_message_contains = "GSI must be created on all replicas" + + while time.time() - start_time < max_wait_seconds: + cr = k8s.get_resource(ref) + if cr and "status" in cr and "conditions" in cr["status"]: + for condition_obj in cr["status"]["conditions"]: + if condition_obj["type"] == "ACK.Terminal" and condition_obj["status"] == "True": + if error_message_contains in condition_obj["message"]: + terminal_condition_set = True + logging.info( + f"Found terminal condition: {condition_obj['message']}") + break + + if terminal_condition_set: + break + time.sleep(interval_seconds) + + # Assert that we received a terminal condition for the invalid update + assert terminal_condition_set, "Terminal condition was not set for invalid GSI configuration" + + # Get the latest resource state after invalid update failure + cr = k8s.wait_resource_consumed_by_controller(ref) + + # Step 4: Remove all GSIs and replicas except the minimum required + logging.info("Removing all GSIs and all but one replica...") + + # Update attribute definitions - keep only required for table + cr["spec"]["attributeDefinitions"] = [ + {"attributeName": "PK", "attributeType": "S"}, + {"attributeName": "SK", "attributeType": "S"}, + ] + + # Remove all GSIs + if "globalSecondaryIndexes" in cr["spec"]: + cr["spec"].pop("globalSecondaryIndexes") + + # Keep only the primary region replica + cr["spec"]["replicationGroup"] = [ + {"regionName": REPLICA_REGION_1} + ] + + # Patch k8s resource with all changes at once + k8s.patch_custom_resource(ref, cr) + + # Wait for all GSIs to be deleted + max_wait_seconds = REPLICA_WAIT_AFTER_SECONDS*2 + interval_seconds = 15 + start_time = time.time() + + while time.time() - start_time < max_wait_seconds: + table_info = table.get(table_name) + if "GlobalSecondaryIndexes" not in table_info: + logging.info("All GSIs have been deleted") + break + logging.info( + f"Waiting for GSIs to be deleted, remaining: {len(table_info.get('GlobalSecondaryIndexes', []))}") + time.sleep(interval_seconds) + + # Verify GSIs are deleted + table_info = table.get(table_name) + assert "GlobalSecondaryIndexes" not in table_info, "All GSIs should be deleted" + + # Wait for replicas to be updated + table.wait_until( + table_name, + table.replicas_match([REPLICA_REGION_1]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*2, + interval_seconds=15, + ) + + # Step 5: Delete the table + logging.info("Deleting the table...") + k8s.delete_custom_resource(ref) + + # Wait for the table to be deleted + max_wait_seconds = REPLICA_WAIT_AFTER_SECONDS*2 + interval_seconds = 15 + start_time = time.time() + + while time.time() - start_time < max_wait_seconds: + if not self.table_exists(table_name): + break + logging.info("Waiting for table to be deleted...") + time.sleep(interval_seconds) + + # Verify table and all resources are deleted + assert not self.table_exists(table_name) + replicas = table.get_replicas(table_name) + assert replicas is None diff --git a/test/e2e/tests/test_table_replicas.py b/test/e2e/tests/test_table_replicas.py new file mode 100644 index 0000000..10b091e --- /dev/null +++ b/test/e2e/tests/test_table_replicas.py @@ -0,0 +1,483 @@ +# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may +# not use this file except in compliance with the License. A copy of the +# License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. + +"""Integration tests for the DynamoDB Table Replicas. +""" + +import logging +import time +from typing import Dict, Tuple + +import boto3 +import pytest +from acktest import tags +from acktest.k8s import resource as k8s +from acktest.resources import random_suffix_name +from e2e import (CRD_GROUP, CRD_VERSION, condition, get_resource_tags, + load_dynamodb_resource, service_marker, table, + wait_for_cr_status) +from e2e.replacement_values import REPLACEMENT_VALUES + +RESOURCE_PLURAL = "tables" + +DELETE_WAIT_AFTER_SECONDS = 30 +MODIFY_WAIT_AFTER_SECONDS = 180 +REPLICA_WAIT_AFTER_SECONDS = 600 + +REPLICA_REGION_1 = "us-east-1" +REPLICA_REGION_2 = "eu-west-1" +REPLICA_REGION_3 = "eu-central-1" +REPLICA_REGION_4 = "ap-southeast-1" +REPLICA_REGION_5 = "eu-north-1" + + +def create_table_with_replicas(name: str, resource_template, regions=None): + if regions is None: + regions = [REPLICA_REGION_1, REPLICA_REGION_2] + + replacements = REPLACEMENT_VALUES.copy() + replacements["TABLE_NAME"] = name + replacements["REPLICA_REGION_1"] = regions[0] + replacements["REPLICA_REGION_2"] = regions[1] + + resource_data = load_dynamodb_resource( + resource_template, + additional_replacements=replacements, + ) + logging.debug(resource_data) + + # Create the k8s resource + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + name, namespace="default", + ) + k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + assert k8s.get_resource_exists(ref) + + return (ref, cr) + + +@pytest.fixture(scope="module") +def table_with_replicas(): + table_name = random_suffix_name("table-replicas", 32) + + (ref, res) = create_table_with_replicas( + table_name, + "table_with_replicas", + [REPLICA_REGION_1, REPLICA_REGION_2] + ) + + yield (ref, res) + + # Delete the k8s resource if it still exists + if k8s.get_resource_exists(ref): + k8s.delete_custom_resource(ref) + time.sleep(DELETE_WAIT_AFTER_SECONDS) + + +def create_table_with_invalid_replicas(name: str): + replacements = REPLACEMENT_VALUES.copy() + replacements["TABLE_NAME"] = name + replacements["REPLICA_REGION_1"] = REPLICA_REGION_1 + + resource_data = load_dynamodb_resource( + "table_with_replicas_invalid", + additional_replacements=replacements, + ) + logging.debug(resource_data) + + # Create the k8s resource + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + name, namespace="default", + ) + k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref) + + assert cr is not None + assert k8s.get_resource_exists(ref) + + return (ref, cr) + + +@pytest.fixture(scope="function") +def table_with_invalid_replicas(): + table_name = random_suffix_name("table-invalid-replicas", 32) + + (ref, res) = create_table_with_invalid_replicas(table_name) + + yield (ref, res) + + # Delete the k8s resource if it still exists + if k8s.get_resource_exists(ref): + k8s.delete_custom_resource(ref) + time.sleep(DELETE_WAIT_AFTER_SECONDS) + + +@service_marker +@pytest.mark.canary +class TestTableReplicas: + def table_exists(self, table_name: str) -> bool: + return table.get(table_name) is not None + + def test_create_table_with_replicas(self, table_with_replicas): + (ref, res) = table_with_replicas + + table_name = res["spec"]["tableName"] + + # Check DynamoDB Table exists + assert self.table_exists(table_name) + + # Wait for the table to be active + table.wait_until( + table_name, + table.status_matches("ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + # Wait for both initial replicas to be created + table.wait_until( + table_name, + table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + # Wait for both replicas to be active + for region in [REPLICA_REGION_1, REPLICA_REGION_2]: + table.wait_until( + table_name, + table.replica_status_matches(region, "ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + # Verify the replica exists + replicas = table.get_replicas(table_name) + assert replicas is not None + assert len(replicas) == 2 + region_names = [r["RegionName"] for r in replicas] + assert REPLICA_REGION_1 in region_names + assert REPLICA_REGION_2 in region_names + for replica in replicas: + assert replica["ReplicaStatus"] == "ACTIVE" + + def test_add_replica(self, table_with_replicas): + (ref, res) = table_with_replicas + + table_name = res["spec"]["tableName"] + + # Check DynamoDB Table exists + assert self.table_exists(table_name) + + # Get CR latest revision + cr = k8s.wait_resource_consumed_by_controller(ref) + + # Remove both initial replicas and add three new ones + cr["spec"]["replicationGroup"] = [ + {"regionName": REPLICA_REGION_3}, + {"regionName": REPLICA_REGION_4}, + {"regionName": REPLICA_REGION_5} + ] + + # Patch k8s resource + k8s.patch_custom_resource(ref, cr) + + # Wait for the replicas to be updated + table.wait_until( + table_name, + table.replicas_match( + [REPLICA_REGION_3, REPLICA_REGION_4, REPLICA_REGION_5]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + # Wait for all new replicas to be active + for region in [REPLICA_REGION_3, REPLICA_REGION_4, REPLICA_REGION_5]: + table.wait_until( + table_name, + table.replica_status_matches(region, "ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + # Verify replicas + replicas = table.get_replicas(table_name) + assert replicas is not None + assert len(replicas) == 3 + + region_names = [r["RegionName"] for r in replicas] + for region in [REPLICA_REGION_3, REPLICA_REGION_4, REPLICA_REGION_5]: + assert region in region_names + + for replica in replicas: + assert replica["ReplicaStatus"] == "ACTIVE" + + def test_remove_replica(self, table_with_replicas): + (ref, res) = table_with_replicas + + table_name = res["spec"]["tableName"] + + # Check DynamoDB Table exists + assert self.table_exists(table_name) + + # Wait for the initial replicas to be created and be active + table.wait_until( + table_name, + table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + for region in [REPLICA_REGION_1, REPLICA_REGION_2]: + table.wait_until( + table_name, + table.replica_status_matches(region, "ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + # Get CR latest revision + cr = k8s.wait_resource_consumed_by_controller(ref) + current_replicas = table.get_replicas(table_name) + assert current_replicas is not None + assert len(current_replicas) >= 1 + + # Get the region names of the current replicas + current_regions = [r["RegionName"] for r in current_replicas] + logging.info(f"Current replicas: {current_regions}") + + # Keep all but the last replica + regions_to_keep = current_regions[:-1] + regions_to_remove = [current_regions[-1]] + + # Remove the last replica + cr["spec"]["replicationGroup"] = [ + {"regionName": region} for region in regions_to_keep + ] + + # Patch k8s resource + k8s.patch_custom_resource(ref, cr) + + # Wait for the replica to be removed + table.wait_until( + table_name, + table.replicas_match(regions_to_keep), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + # Verify remaining replicas + replicas = table.get_replicas(table_name) + assert replicas is not None + assert len(replicas) == len(regions_to_keep) + + # Check that removed region is gone and kept regions are present + region_names = [r["RegionName"] for r in replicas] + for region in regions_to_keep: + assert region in region_names + for region in regions_to_remove: + assert region not in region_names + + def test_delete_table_with_replicas(self, table_with_replicas): + (ref, res) = table_with_replicas + + table_name = res["spec"]["tableName"] + + # Check DynamoDB Table exists + assert self.table_exists(table_name) + + # Delete the k8s resource + k8s.delete_custom_resource(ref) + + max_wait_seconds = REPLICA_WAIT_AFTER_SECONDS + interval_seconds = 30 + start_time = time.time() + + while time.time() - start_time < max_wait_seconds: + if not self.table_exists(table_name): + break + time.sleep(interval_seconds) + + # Verify the table was deleted + assert not self.table_exists(table_name) + replicas = table.get_replicas(table_name) + assert replicas is None + + def test_terminal_condition_for_invalid_stream_specification(self, table_with_invalid_replicas): + (ref, res) = table_with_invalid_replicas + + table_name = res["spec"]["tableName"] + + # Check DynamoDB Table exists + assert self.table_exists(table_name) + + # Wait for the terminal condition to be set + max_wait_seconds = 120 + interval_seconds = 10 + start_time = time.time() + terminal_condition_set = False + + while time.time() - start_time < max_wait_seconds: + cr = k8s.get_resource(ref) + if cr and "status" in cr and "conditions" in cr["status"]: + for condition_obj in cr["status"]["conditions"]: + if condition_obj["type"] == "ACK.Terminal" and condition_obj["status"] == "True": + terminal_condition_set = True + # Verify the error message + assert "table must have DynamoDB Streams enabled with StreamViewType set to NEW_AND_OLD_IMAGES" in condition_obj[ + "message"] + break + + if terminal_condition_set: + break + + time.sleep(interval_seconds) + + assert terminal_condition_set, "Terminal condition was not set for invalid StreamSpecification" + + def test_simultaneous_updates(self, table_with_replicas): + (ref, res) = table_with_replicas + + table_name = res["spec"]["tableName"] + + # Check DynamoDB Table exists + assert self.table_exists(table_name) + + # Wait for the initial replicas to be created and be active + table.wait_until( + table_name, + table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + for region in [REPLICA_REGION_1, REPLICA_REGION_2]: + table.wait_until( + table_name, + table.replica_status_matches(region, "ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + # Get CR latest revision + cr = k8s.wait_resource_consumed_by_controller(ref) + + # Prepare simultaneous updates to multiple fields: + # 1. Update tags + # 2. Change replicas + # 3. Add GSI + + # Add attribute definitions needed for GSI + cr["spec"]["attributeDefinitions"] = [ + {"attributeName": "PK", "attributeType": "S"}, + {"attributeName": "SK", "attributeType": "S"}, + {"attributeName": "GSI1PK", "attributeType": "S"}, + {"attributeName": "GSI1SK", "attributeType": "S"} + ] + + # Add a GSI + cr["spec"]["globalSecondaryIndexes"] = [{ + "indexName": "GSI1", + "keySchema": [ + {"attributeName": "GSI1PK", "keyType": "HASH"}, + {"attributeName": "GSI1SK", "keyType": "RANGE"} + ], + "projection": { + "projectionType": "ALL" + } + }] + + # Update replicas - remove one and add a different one + # Set directly rather than depending on current replicas + cr["spec"]["replicationGroup"] = [ + {"regionName": REPLICA_REGION_1}, # Keep the first defined region + {"regionName": REPLICA_REGION_3} # Add a new region + ] + + # Update tags + cr["spec"]["tags"] = [ + {"key": "Environment", "value": "Test"}, + {"key": "Purpose", "value": "SimontemourTest"}, + {"key": "UpdatedAt", "value": time.strftime("%Y-%m-%d")} + ] + + # Patch k8s resource with all changes at once + k8s.patch_custom_resource(ref, cr) + + # Wait for GSI to be created (usually the slowest operation) + table.wait_until( + table_name, + table.gsi_matches([{ + "indexName": "GSI1", + "keySchema": [ + {"attributeName": "GSI1PK", "keyType": "HASH"}, + {"attributeName": "GSI1SK", "keyType": "RANGE"} + ], + "projection": { + "projectionType": "ALL" + } + }]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*3, + interval_seconds=30, + ) + + # Wait for replicas to be updated + expected_regions = [REPLICA_REGION_1, REPLICA_REGION_3] + table.wait_until( + table_name, + table.replicas_match(expected_regions), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*3, + interval_seconds=30, + ) + + # Verify all changes were applied + + # Check tags + table_tags = get_resource_tags( + cr["status"]["ackResourceMetadata"]["arn"]) + tags.assert_ack_system_tags(tags=table_tags) + + expected_tags = { + "Environment": "Test", + "Purpose": "SimontemourTest", + "UpdatedAt": time.strftime("%Y-%m-%d") + } + + # Verify custom tags (ignoring ACK system tags) + for key, value in expected_tags.items(): + assert key in table_tags + assert table_tags[key] == value + + # Verify GSI + table_info = table.get(table_name) + assert "GlobalSecondaryIndexes" in table_info + assert len(table_info["GlobalSecondaryIndexes"]) == 1 + assert table_info["GlobalSecondaryIndexes"][0]["IndexName"] == "GSI1" + + # Verify replicas + replicas = table.get_replicas(table_name) + assert replicas is not None + assert len(replicas) == 2 + region_names = [r["RegionName"] for r in replicas] + assert REPLICA_REGION_1 in region_names + assert REPLICA_REGION_3 in region_names + assert REPLICA_REGION_2 not in region_names + + # Verify all replicas are active + for replica in replicas: + assert replica["ReplicaStatus"] == "ACTIVE" From 534418ad5c7c919dbe289e1119820ef2db2efcd5 Mon Sep 17 00:00:00 2001 From: arush sharma Date: Fri, 21 Mar 2025 16:28:16 -0700 Subject: [PATCH 3/5] Rename to table replicas --- apis/v1alpha1/ack-generate-metadata.yaml | 6 +- apis/v1alpha1/generator.yaml | 2 +- apis/v1alpha1/table.go | 6 +- apis/v1alpha1/zz_generated.deepcopy.go | 22 +- .../dynamodb.services.k8s.aws_tables.yaml | 78 ++-- generator.yaml | 2 +- .../dynamodb.services.k8s.aws_tables.yaml | 78 ++-- pkg/resource/table/hooks.go | 14 +- pkg/resource/table/hooks_replica_updates.go | 135 ++++-- pkg/resource/table/sdk.go | 30 +- .../table/sdk_create_post_set_output.go.tpl | 8 - .../table/sdk_delete_pre_build_request.go.tpl | 12 +- .../table/sdk_read_one_post_set_output.go.tpl | 10 +- test/e2e/resources/table_with_replicas.yaml | 2 +- .../table_with_replicas_invalid.yaml | 2 +- test/e2e/tests/test_gsi_and_replicas.py | 405 ------------------ test/e2e/tests/test_table_replicas.py | 266 ++++++------ 17 files changed, 358 insertions(+), 720 deletions(-) delete mode 100644 test/e2e/tests/test_gsi_and_replicas.py diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index cd7b5bb..28231d7 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,13 +1,13 @@ ack_generate_info: - build_date: "2025-03-21T05:42:15Z" + build_date: "2025-03-23T21:30:39Z" build_hash: 3722729cebe6d3c03c7e442655ef0846f91566a2 go_version: go1.24.0 version: v0.43.2-7-g3722729 -api_directory_checksum: a3ce13b1988591541d32ac56e55b416ff3b6bfc2 +api_directory_checksum: cb49386ebd7bb50e2521072a76262c72b9dbd285 api_version: v1alpha1 aws_sdk_go_version: v1.32.6 generator_config_info: - file_checksum: c8721c6d81ac66a3ea6c2b96b784c3da48b798b5 + file_checksum: 4533fa8aca3b134b5895ad6ce9a093c3446d99da original_file_name: generator.yaml last_modification: reason: API generation diff --git a/apis/v1alpha1/generator.yaml b/apis/v1alpha1/generator.yaml index 4531bd4..4ed38b6 100644 --- a/apis/v1alpha1/generator.yaml +++ b/apis/v1alpha1/generator.yaml @@ -25,7 +25,7 @@ operations: resources: Table: fields: - ReplicationGroup: + TableReplicas: custom_field: list_of: CreateReplicationGroupMemberAction compare: diff --git a/apis/v1alpha1/table.go b/apis/v1alpha1/table.go index e04f49c..4865df0 100644 --- a/apis/v1alpha1/table.go +++ b/apis/v1alpha1/table.go @@ -141,8 +141,7 @@ type TableSpec struct { // For current minimum and maximum provisioned throughput values, see Service, // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. - ProvisionedThroughput *ProvisionedThroughput `json:"provisionedThroughput,omitempty"` - ReplicationGroup []*CreateReplicationGroupMemberAction `json:"replicationGroup,omitempty"` + ProvisionedThroughput *ProvisionedThroughput `json:"provisionedThroughput,omitempty"` // Represents the settings used to enable server-side encryption. SSESpecification *SSESpecification `json:"sseSpecification,omitempty"` // The settings for DynamoDB Streams on the table. These settings consist of: @@ -164,7 +163,8 @@ type TableSpec struct { // The name of the table to create. You can also provide the Amazon Resource // Name (ARN) of the table in this parameter. // +kubebuilder:validation:Required - TableName *string `json:"tableName"` + TableName *string `json:"tableName"` + TableReplicas []*CreateReplicationGroupMemberAction `json:"tableReplicas,omitempty"` // A list of key-value pairs to label the table. For more information, see Tagging // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html). Tags []*Tag `json:"tags,omitempty"` diff --git a/apis/v1alpha1/zz_generated.deepcopy.go b/apis/v1alpha1/zz_generated.deepcopy.go index 27eecf7..46bd371 100644 --- a/apis/v1alpha1/zz_generated.deepcopy.go +++ b/apis/v1alpha1/zz_generated.deepcopy.go @@ -2739,17 +2739,6 @@ func (in *TableSpec) DeepCopyInto(out *TableSpec) { *out = new(ProvisionedThroughput) (*in).DeepCopyInto(*out) } - if in.ReplicationGroup != nil { - in, out := &in.ReplicationGroup, &out.ReplicationGroup - *out = make([]*CreateReplicationGroupMemberAction, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(CreateReplicationGroupMemberAction) - (*in).DeepCopyInto(*out) - } - } - } if in.SSESpecification != nil { in, out := &in.SSESpecification, &out.SSESpecification *out = new(SSESpecification) @@ -2770,6 +2759,17 @@ func (in *TableSpec) DeepCopyInto(out *TableSpec) { *out = new(string) **out = **in } + if in.TableReplicas != nil { + in, out := &in.TableReplicas, &out.TableReplicas + *out = make([]*CreateReplicationGroupMemberAction, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(CreateReplicationGroupMemberAction) + (*in).DeepCopyInto(*out) + } + } + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make([]*Tag, len(*in)) diff --git a/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml b/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml index a4e03bd..196263e 100644 --- a/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml +++ b/config/crd/bases/dynamodb.services.k8s.aws_tables.yaml @@ -322,45 +322,6 @@ spec: format: int64 type: integer type: object - replicationGroup: - items: - description: Represents a replica to be created. - properties: - globalSecondaryIndexes: - items: - description: Represents the properties of a replica global - secondary index. - properties: - indexName: - type: string - provisionedThroughputOverride: - description: |- - Replica-specific provisioned throughput settings. If not specified, uses - the source table's provisioned throughput settings. - properties: - readCapacityUnits: - format: int64 - type: integer - type: object - type: object - type: array - kmsMasterKeyID: - type: string - provisionedThroughputOverride: - description: |- - Replica-specific provisioned throughput settings. If not specified, uses - the source table's provisioned throughput settings. - properties: - readCapacityUnits: - format: int64 - type: integer - type: object - regionName: - type: string - tableClassOverride: - type: string - type: object - type: array sseSpecification: description: Represents the settings used to enable server-side encryption. properties: @@ -415,6 +376,45 @@ spec: The name of the table to create. You can also provide the Amazon Resource Name (ARN) of the table in this parameter. type: string + tableReplicas: + items: + description: Represents a replica to be created. + properties: + globalSecondaryIndexes: + items: + description: Represents the properties of a replica global + secondary index. + properties: + indexName: + type: string + provisionedThroughputOverride: + description: |- + Replica-specific provisioned throughput settings. If not specified, uses + the source table's provisioned throughput settings. + properties: + readCapacityUnits: + format: int64 + type: integer + type: object + type: object + type: array + kmsMasterKeyID: + type: string + provisionedThroughputOverride: + description: |- + Replica-specific provisioned throughput settings. If not specified, uses + the source table's provisioned throughput settings. + properties: + readCapacityUnits: + format: int64 + type: integer + type: object + regionName: + type: string + tableClassOverride: + type: string + type: object + type: array tags: description: |- A list of key-value pairs to label the table. For more information, see Tagging diff --git a/generator.yaml b/generator.yaml index 4531bd4..4ed38b6 100644 --- a/generator.yaml +++ b/generator.yaml @@ -25,7 +25,7 @@ operations: resources: Table: fields: - ReplicationGroup: + TableReplicas: custom_field: list_of: CreateReplicationGroupMemberAction compare: diff --git a/helm/crds/dynamodb.services.k8s.aws_tables.yaml b/helm/crds/dynamodb.services.k8s.aws_tables.yaml index a8a7fc6..feaa235 100644 --- a/helm/crds/dynamodb.services.k8s.aws_tables.yaml +++ b/helm/crds/dynamodb.services.k8s.aws_tables.yaml @@ -326,45 +326,6 @@ spec: format: int64 type: integer type: object - replicationGroup: - items: - description: Represents a replica to be created. - properties: - globalSecondaryIndexes: - items: - description: Represents the properties of a replica global - secondary index. - properties: - indexName: - type: string - provisionedThroughputOverride: - description: |- - Replica-specific provisioned throughput settings. If not specified, uses - the source table's provisioned throughput settings. - properties: - readCapacityUnits: - format: int64 - type: integer - type: object - type: object - type: array - kmsMasterKeyID: - type: string - provisionedThroughputOverride: - description: |- - Replica-specific provisioned throughput settings. If not specified, uses - the source table's provisioned throughput settings. - properties: - readCapacityUnits: - format: int64 - type: integer - type: object - regionName: - type: string - tableClassOverride: - type: string - type: object - type: array sseSpecification: description: Represents the settings used to enable server-side encryption. properties: @@ -419,6 +380,45 @@ spec: The name of the table to create. You can also provide the Amazon Resource Name (ARN) of the table in this parameter. type: string + tableReplicas: + items: + description: Represents a replica to be created. + properties: + globalSecondaryIndexes: + items: + description: Represents the properties of a replica global + secondary index. + properties: + indexName: + type: string + provisionedThroughputOverride: + description: |- + Replica-specific provisioned throughput settings. If not specified, uses + the source table's provisioned throughput settings. + properties: + readCapacityUnits: + format: int64 + type: integer + type: object + type: object + type: array + kmsMasterKeyID: + type: string + provisionedThroughputOverride: + description: |- + Replica-specific provisioned throughput settings. If not specified, uses + the source table's provisioned throughput settings. + properties: + readCapacityUnits: + format: int64 + type: integer + type: object + regionName: + type: string + tableClassOverride: + type: string + type: object + type: array tags: description: |- A list of key-value pairs to label the table. For more information, see Tagging diff --git a/pkg/resource/table/hooks.go b/pkg/resource/table/hooks.go index 2df3aac..bb094c9 100644 --- a/pkg/resource/table/hooks.go +++ b/pkg/resource/table/hooks.go @@ -233,7 +233,7 @@ func (rm *resourceManager) customUpdateTable( } return nil, err } - case delta.DifferentAt("Spec.ReplicationGroup"): + case delta.DifferentAt("Spec.TableReplicas"): if !hasStreamSpecificationWithNewAndOldImages(desired) { msg := "table must have DynamoDB Streams enabled with StreamViewType set to NEW_AND_OLD_IMAGES for replica updates" rlog.Debug(msg) @@ -243,7 +243,7 @@ func (rm *resourceManager) customUpdateTable( if !canUpdateTableReplicas(latest) { return nil, requeueWaitReplicasActive } - if err := rm.syncReplicaUpdates(ctx, latest, desired); err != nil { + if err := rm.syncReplicas(ctx, latest, desired); err != nil { return nil, err } } @@ -581,11 +581,11 @@ func customPreCompare( } // Handle ReplicaUpdates API comparison - if len(a.ko.Spec.ReplicationGroup) != len(b.ko.Spec.ReplicationGroup) { - delta.Add("Spec.ReplicationGroup", a.ko.Spec.ReplicationGroup, b.ko.Spec.ReplicationGroup) - } else if a.ko.Spec.ReplicationGroup != nil && b.ko.Spec.ReplicationGroup != nil { - if !equalReplicaArrays(a.ko.Spec.ReplicationGroup, b.ko.Spec.ReplicationGroup) { - delta.Add("Spec.ReplicationGroup", a.ko.Spec.ReplicationGroup, b.ko.Spec.ReplicationGroup) + if len(a.ko.Spec.TableReplicas) != len(b.ko.Spec.TableReplicas) { + delta.Add("Spec.TableReplicas", a.ko.Spec.TableReplicas, b.ko.Spec.TableReplicas) + } else if a.ko.Spec.TableReplicas != nil && b.ko.Spec.TableReplicas != nil { + if !equalReplicaArrays(a.ko.Spec.TableReplicas, b.ko.Spec.TableReplicas) { + delta.Add("Spec.TableReplicas", a.ko.Spec.TableReplicas, b.ko.Spec.TableReplicas) } } diff --git a/pkg/resource/table/hooks_replica_updates.go b/pkg/resource/table/hooks_replica_updates.go index 1357dc5..18c616e 100644 --- a/pkg/resource/table/hooks_replica_updates.go +++ b/pkg/resource/table/hooks_replica_updates.go @@ -15,8 +15,12 @@ package table import ( "context" + "strings" + "time" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" + ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" + ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" "github.com/aws/aws-sdk-go-v2/aws" svcsdk "github.com/aws/aws-sdk-go-v2/service/dynamodb" svcsdktypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" @@ -195,46 +199,58 @@ func createReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) s func updateReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) svcsdktypes.ReplicationGroupUpdate { replicaUpdate := svcsdktypes.ReplicationGroupUpdate{} updateAction := &svcsdktypes.UpdateReplicationGroupMemberAction{} + isValidUpdate := false // updates to gsi without ProvisionedThroughputOverride are invalid if replica.RegionName != nil { updateAction.RegionName = aws.String(*replica.RegionName) + // RegionName is required but doesn't count as a update } if replica.KMSMasterKeyID != nil { updateAction.KMSMasterKeyId = aws.String(*replica.KMSMasterKeyID) + isValidUpdate = true } if replica.TableClassOverride != nil { updateAction.TableClassOverride = svcsdktypes.TableClass(*replica.TableClassOverride) + isValidUpdate = true } - if replica.ProvisionedThroughputOverride != nil { - updateAction.ProvisionedThroughputOverride = &svcsdktypes.ProvisionedThroughputOverride{} - if replica.ProvisionedThroughputOverride.ReadCapacityUnits != nil { - updateAction.ProvisionedThroughputOverride.ReadCapacityUnits = replica.ProvisionedThroughputOverride.ReadCapacityUnits + if replica.ProvisionedThroughputOverride != nil && + replica.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + updateAction.ProvisionedThroughputOverride = &svcsdktypes.ProvisionedThroughputOverride{ + ReadCapacityUnits: replica.ProvisionedThroughputOverride.ReadCapacityUnits, + } + isValidUpdate = true + } + + // Only include GSIs that have provisioned throughput overrides + var gsisWithOverrides []svcsdktypes.ReplicaGlobalSecondaryIndex + for _, gsi := range replica.GlobalSecondaryIndexes { + if gsi.IndexName != nil && gsi.ProvisionedThroughputOverride != nil && + gsi.ProvisionedThroughputOverride.ReadCapacityUnits != nil { + gsisWithOverrides = append(gsisWithOverrides, svcsdktypes.ReplicaGlobalSecondaryIndex{ + IndexName: aws.String(*gsi.IndexName), + ProvisionedThroughputOverride: &svcsdktypes.ProvisionedThroughputOverride{ + ReadCapacityUnits: gsi.ProvisionedThroughputOverride.ReadCapacityUnits, + }, + }) + isValidUpdate = true } } - if replica.GlobalSecondaryIndexes != nil { - gsiList := []svcsdktypes.ReplicaGlobalSecondaryIndex{} - for _, gsi := range replica.GlobalSecondaryIndexes { - replicaGSI := svcsdktypes.ReplicaGlobalSecondaryIndex{} - if gsi.IndexName != nil { - replicaGSI.IndexName = aws.String(*gsi.IndexName) - } - if gsi.ProvisionedThroughputOverride != nil { - replicaGSI.ProvisionedThroughputOverride = &svcsdktypes.ProvisionedThroughputOverride{} - if gsi.ProvisionedThroughputOverride.ReadCapacityUnits != nil { - replicaGSI.ProvisionedThroughputOverride.ReadCapacityUnits = gsi.ProvisionedThroughputOverride.ReadCapacityUnits - } - } - gsiList = append(gsiList, replicaGSI) - } - updateAction.GlobalSecondaryIndexes = gsiList + // Only set GlobalSecondaryIndexes if we have GSIs with throughput overrides + if len(gsisWithOverrides) > 0 { + updateAction.GlobalSecondaryIndexes = gsisWithOverrides } - replicaUpdate.Update = updateAction - return replicaUpdate + if isValidUpdate { + replicaUpdate.Update = updateAction + return replicaUpdate + } + + // If no valid updates, return an empty ReplicationGroupUpdate + return svcsdktypes.ReplicationGroupUpdate{} } // deleteReplicaUpdate creates a ReplicationGroupUpdate for deleting an existing replica @@ -249,6 +265,11 @@ func deleteReplicaUpdate(regionName string) svcsdktypes.ReplicationGroupUpdate { // canUpdateTableReplicas returns true if it's possible to update table replicas. // We can only modify replicas when they are in ACTIVE state. func canUpdateTableReplicas(r *resource) bool { + // Check if Status or Replicas is nil + // needed when called by sdkdelete + if r == nil || r.ko == nil || r.ko.Status.Replicas == nil { + return true // If no replicas exist, we can proceed with updates + } // Check if any replica is not in ACTIVE state for _, replicaDesc := range r.ko.Status.Replicas { if replicaDesc.RegionName != nil && replicaDesc.ReplicaStatus != nil { @@ -272,14 +293,14 @@ func hasStreamSpecificationWithNewAndOldImages(r *resource) bool { return StreamEnabled && StreamViewType } -// syncReplicaUpdates updates the replica configuration for a table -func (rm *resourceManager) syncReplicaUpdates( +// syncReplicas updates the replica configuration for a table +func (rm *resourceManager) syncReplicas( ctx context.Context, latest *resource, desired *resource, ) (err error) { rlog := ackrtlog.FromContext(ctx) - exit := rlog.Trace("rm.syncReplicaUpdates") + exit := rlog.Trace("rm.syncReplicas") defer func() { exit(err) }() @@ -293,7 +314,33 @@ func (rm *resourceManager) syncReplicaUpdates( _, err = rm.sdkapi.UpdateTable(ctx, input) rm.metrics.RecordAPICall("UPDATE", "UpdateTable", err) if err != nil { - return err + // Handle specific errors + if awsErr, ok := ackerr.AWSError(err); ok { + // Handle ValidationException - when replicas are not part of the global table + if awsErr.ErrorCode() == "ValidationException" && + strings.Contains(awsErr.ErrorMessage(), "not part of the global table") { + // A replica was already deleted + rlog.Debug("replica already deleted from global table", + "table", *latest.ko.Spec.TableName, + "error", awsErr.ErrorMessage()) + return ackrequeue.NeededAfter( + ErrTableUpdating, + 30*time.Second, + ) + } + + // Handle ResourceInUseException - when the table is being updated + if awsErr.ErrorCode() == "ResourceInUseException" { + rlog.Debug("table is currently in use, will retry", + "table", *latest.ko.Spec.TableName, + "error", awsErr.ErrorMessage()) + return ackrequeue.NeededAfter( + ErrTableUpdating, + 30*time.Second, + ) + } + return err + } } // If there are more replicas to process, requeue @@ -334,22 +381,22 @@ func (rm *resourceManager) newUpdateTableReplicaUpdatesOneAtATimePayload( // We'll only perform one replica action at a time if len(createReplicas) > 0 { - region := *createReplicas[0].RegionName - rlog.Debug("creating replica in region", "table", *desired.ko.Spec.TableName, "region", region) + replica := *createReplicas[0] + rlog.Debug("creating replica in region", "table", *desired.ko.Spec.TableName, "region", *replica.RegionName) input.ReplicaUpdates = append(input.ReplicaUpdates, createReplicaUpdate(createReplicas[0])) return input, replicasInQueue, nil } if len(updateReplicas) > 0 { - region := *updateReplicas[0].RegionName - rlog.Debug("updating replica in region", "table", *desired.ko.Spec.TableName, "region", region) + replica := *updateReplicas[0] + rlog.Debug("updating replica in region", "table", *desired.ko.Spec.TableName, "region", *replica.RegionName) input.ReplicaUpdates = append(input.ReplicaUpdates, updateReplicaUpdate(updateReplicas[0])) return input, replicasInQueue, nil } if len(deleteRegions) > 0 { - region := deleteRegions[0] - rlog.Debug("deleting replica in region", "table", *desired.ko.Spec.TableName, "region", region) + replica := deleteRegions[0] + rlog.Debug("deleting replica in region", "table", *desired.ko.Spec.TableName, "region", replica) input.ReplicaUpdates = append(input.ReplicaUpdates, deleteReplicaUpdate(deleteRegions[0])) return input, replicasInQueue, nil } @@ -367,27 +414,27 @@ func calculateReplicaUpdates( updateReplicas []*v1alpha1.CreateReplicationGroupMemberAction, deleteRegions []string, ) { - existingRegions := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) - if latest != nil && latest.ko.Spec.ReplicationGroup != nil { - for _, replica := range latest.ko.Spec.ReplicationGroup { + latestReplicas := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) + if latest.ko.Spec.TableReplicas != nil { + for _, replica := range latest.ko.Spec.TableReplicas { if replica.RegionName != nil { - existingRegions[*replica.RegionName] = replica + latestReplicas[*replica.RegionName] = replica } } } - desiredRegions := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) - if desired != nil && desired.ko.Spec.ReplicationGroup != nil { - for _, replica := range desired.ko.Spec.ReplicationGroup { + desiredReplicas := make(map[string]*v1alpha1.CreateReplicationGroupMemberAction) + if desired != nil && desired.ko.Spec.TableReplicas != nil { + for _, replica := range desired.ko.Spec.TableReplicas { if replica.RegionName != nil { - desiredRegions[*replica.RegionName] = replica + desiredReplicas[*replica.RegionName] = replica } } } // Calculate replicas to create or update - for regionName, desiredReplica := range desiredRegions { - existingReplica, exists := existingRegions[regionName] + for desiredRegion, desiredReplica := range desiredReplicas { + existingReplica, exists := latestReplicas[desiredRegion] if !exists { createReplicas = append(createReplicas, desiredReplica) } else if !equalCreateReplicationGroupMemberActions(existingReplica, desiredReplica) { @@ -396,8 +443,8 @@ func calculateReplicaUpdates( } // Calculate regions to delete - for regionName := range existingRegions { - if _, exists := desiredRegions[regionName]; !exists { + for regionName := range latestReplicas { + if _, exists := desiredReplicas[regionName]; !exists { deleteRegions = append(deleteRegions, regionName) } } diff --git a/pkg/resource/table/sdk.go b/pkg/resource/table/sdk.go index 9a9d639..d93c081 100644 --- a/pkg/resource/table/sdk.go +++ b/pkg/resource/table/sdk.go @@ -440,8 +440,8 @@ func (rm *resourceManager) sdkFind( } else { ko.Spec.BillingMode = aws.String("PROVISIONED") } - if resp.Table.Replicas != nil { - replicationGroup := []*svcapitypes.CreateReplicationGroupMemberAction{} + if len(resp.Table.Replicas) > 0 { + tableReplicas := []*svcapitypes.CreateReplicationGroupMemberAction{} for _, replica := range resp.Table.Replicas { replicaElem := &svcapitypes.CreateReplicationGroupMemberAction{} if replica.RegionName != nil { @@ -473,11 +473,11 @@ func (rm *resourceManager) sdkFind( if replica.ReplicaTableClassSummary != nil && replica.ReplicaTableClassSummary.TableClass != "" { replicaElem.TableClassOverride = aws.String(string(replica.ReplicaTableClassSummary.TableClass)) } - replicationGroup = append(replicationGroup, replicaElem) + tableReplicas = append(tableReplicas, replicaElem) } - ko.Spec.ReplicationGroup = replicationGroup + ko.Spec.TableReplicas = tableReplicas } else { - ko.Spec.ReplicationGroup = nil + ko.Spec.TableReplicas = nil } if isTableCreating(&resource{ko}) { return &resource{ko}, requeueWaitWhileCreating @@ -846,14 +846,6 @@ func (rm *resourceManager) sdkCreate( return nil, err } } - // Check if replicas were specified during creation - if len(desired.ko.Spec.ReplicationGroup) > 0 { - ko.Spec.ReplicationGroup = desired.ko.Spec.ReplicationGroup - - // Return with a requeue to process replica updates - // This will trigger the reconciliation loop again, which will call syncReplicaUpdates - return &resource{ko}, requeueWaitWhileUpdating - } return &resource{ko}, nil } @@ -1065,16 +1057,22 @@ func (rm *resourceManager) sdkDelete( } // If there are replicas, we need to remove them before deleting the table - if len(r.ko.Spec.ReplicationGroup) > 0 { + if !canUpdateTableReplicas(latest) { + return nil, requeueWaitReplicasActive + } + if len(r.ko.Spec.TableReplicas) > 0 { desired := &resource{ ko: r.ko.DeepCopy(), } - desired.ko.Spec.ReplicationGroup = nil + desired.ko.Spec.TableReplicas = nil - err := rm.syncReplicaUpdates(ctx, r, desired) + err := rm.syncReplicas(ctx, r, desired) if err != nil { return nil, err } + // Requeue to wait for replica removal to complete before attempting table deletion + // When syncReplicas returns an error other than requeue + return r, requeueWaitWhileDeleting } input, err := rm.newDeleteRequestPayload(r) diff --git a/templates/hooks/table/sdk_create_post_set_output.go.tpl b/templates/hooks/table/sdk_create_post_set_output.go.tpl index 44d80d4..49d67a2 100644 --- a/templates/hooks/table/sdk_create_post_set_output.go.tpl +++ b/templates/hooks/table/sdk_create_post_set_output.go.tpl @@ -2,12 +2,4 @@ if err := rm.syncTTL(ctx, desired, &resource{ko}); err != nil { return nil, err } - } - // Check if replicas were specified during creation - if len(desired.ko.Spec.ReplicationGroup) > 0 { - ko.Spec.ReplicationGroup = desired.ko.Spec.ReplicationGroup - - // Return with a requeue to process replica updates - // This will trigger the reconciliation loop again, which will call syncReplicaUpdates - return &resource{ko}, requeueWaitWhileUpdating } \ No newline at end of file diff --git a/templates/hooks/table/sdk_delete_pre_build_request.go.tpl b/templates/hooks/table/sdk_delete_pre_build_request.go.tpl index 06d976b..6d67ae9 100644 --- a/templates/hooks/table/sdk_delete_pre_build_request.go.tpl +++ b/templates/hooks/table/sdk_delete_pre_build_request.go.tpl @@ -6,14 +6,20 @@ } // If there are replicas, we need to remove them before deleting the table - if len(r.ko.Spec.ReplicationGroup) > 0 { + if !canUpdateTableReplicas(latest) { + return nil, requeueWaitReplicasActive + } + if len(r.ko.Spec.TableReplicas) > 0 { desired := &resource{ ko: r.ko.DeepCopy(), } - desired.ko.Spec.ReplicationGroup = nil + desired.ko.Spec.TableReplicas = nil - err := rm.syncReplicaUpdates(ctx, r, desired) + err := rm.syncReplicas(ctx, r, desired) if err != nil { return nil, err } + // Requeue to wait for replica removal to complete before attempting table deletion + // When syncReplicas returns an error other than requeue + return r, requeueWaitWhileDeleting } diff --git a/templates/hooks/table/sdk_read_one_post_set_output.go.tpl b/templates/hooks/table/sdk_read_one_post_set_output.go.tpl index e629f72..f983d5c 100644 --- a/templates/hooks/table/sdk_read_one_post_set_output.go.tpl +++ b/templates/hooks/table/sdk_read_one_post_set_output.go.tpl @@ -53,8 +53,8 @@ } else { ko.Spec.BillingMode = aws.String("PROVISIONED") } - if resp.Table.Replicas != nil { - replicationGroup := []*svcapitypes.CreateReplicationGroupMemberAction{} + if len(resp.Table.Replicas) > 0 { + tableReplicas := []*svcapitypes.CreateReplicationGroupMemberAction{} for _, replica := range resp.Table.Replicas { replicaElem := &svcapitypes.CreateReplicationGroupMemberAction{} if replica.RegionName != nil { @@ -86,11 +86,11 @@ if replica.ReplicaTableClassSummary != nil && replica.ReplicaTableClassSummary.TableClass != "" { replicaElem.TableClassOverride = aws.String(string(replica.ReplicaTableClassSummary.TableClass)) } - replicationGroup = append(replicationGroup, replicaElem) + tableReplicas = append(tableReplicas, replicaElem) } - ko.Spec.ReplicationGroup = replicationGroup + ko.Spec.TableReplicas = tableReplicas } else { - ko.Spec.ReplicationGroup = nil + ko.Spec.TableReplicas = nil } if isTableCreating(&resource{ko}) { return &resource{ko}, requeueWaitWhileCreating diff --git a/test/e2e/resources/table_with_replicas.yaml b/test/e2e/resources/table_with_replicas.yaml index 08da660..dbca03f 100644 --- a/test/e2e/resources/table_with_replicas.yaml +++ b/test/e2e/resources/table_with_replicas.yaml @@ -19,6 +19,6 @@ spec: streamSpecification: streamEnabled: true streamViewType: "NEW_AND_OLD_IMAGES" - replicationGroup: + tableReplicas: - regionName: $REPLICA_REGION_1 - regionName: $REPLICA_REGION_2 diff --git a/test/e2e/resources/table_with_replicas_invalid.yaml b/test/e2e/resources/table_with_replicas_invalid.yaml index a75eff7..bd4d870 100644 --- a/test/e2e/resources/table_with_replicas_invalid.yaml +++ b/test/e2e/resources/table_with_replicas_invalid.yaml @@ -16,5 +16,5 @@ spec: - attributeName: SK keyType: RANGE billingMode: PAY_PER_REQUEST - replicationGroup: + tableReplicas: - regionName: $REPLICA_REGION_1 diff --git a/test/e2e/tests/test_gsi_and_replicas.py b/test/e2e/tests/test_gsi_and_replicas.py deleted file mode 100644 index f85ae42..0000000 --- a/test/e2e/tests/test_gsi_and_replicas.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may -# not use this file except in compliance with the License. A copy of the -# License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. - -"""Integration tests for DynamoDB Tables with both GSIs and Replicas. -""" - -import logging -import time -from typing import Dict, Tuple - -import boto3 -import pytest -from acktest import tags -from acktest.k8s import resource as k8s -from acktest.resources import random_suffix_name -from e2e import (CRD_GROUP, CRD_VERSION, condition, get_resource_tags, - load_dynamodb_resource, service_marker, table, - wait_for_cr_status) -from e2e.replacement_values import REPLACEMENT_VALUES - -RESOURCE_PLURAL = "tables" - -DELETE_WAIT_AFTER_SECONDS = 15 -MODIFY_WAIT_AFTER_SECONDS = 90 -REPLICA_WAIT_AFTER_SECONDS = 300 # Replicas can take longer to create/update - -REPLICA_REGION_1 = "us-east-1" -REPLICA_REGION_2 = "eu-west-1" -REPLICA_REGION_3 = "eu-central-1" - - -def create_table_with_gsi_and_replicas(name: str): - replacements = REPLACEMENT_VALUES.copy() - replacements["TABLE_NAME"] = name - replacements["REPLICA_REGION_1"] = REPLICA_REGION_1 - replacements["REPLICA_REGION_2"] = REPLICA_REGION_2 - - resource_data = load_dynamodb_resource( - "table_with_gsi_and_replicas", - additional_replacements=replacements, - ) - logging.debug(resource_data) - - # Create the k8s resource - ref = k8s.CustomResourceReference( - CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, - name, namespace="default", - ) - k8s.create_custom_resource(ref, resource_data) - cr = k8s.wait_resource_consumed_by_controller(ref) - - assert cr is not None - assert k8s.get_resource_exists(ref) - - return (ref, cr) - - -@pytest.fixture(scope="function") -def table_with_gsi_and_replicas(): - table_name = random_suffix_name("table-gsi-replicas", 32) - - (ref, res) = create_table_with_gsi_and_replicas(table_name) - - yield (ref, res) - - # Delete the k8s resource if it still exists - if k8s.get_resource_exists(ref): - k8s.delete_custom_resource(ref) - time.sleep(DELETE_WAIT_AFTER_SECONDS) - - -@service_marker -@pytest.mark.canary -class TestTableGSIAndReplicas: - def table_exists(self, table_name: str) -> bool: - return table.get(table_name) is not None - - def test_gsi_and_replicas_lifecycle(self, table_with_gsi_and_replicas): - """Test full lifecycle of a table with both GSIs and replicas, including valid and invalid update scenarios: - 1. Create table with 2 GSIs and 2 replicas - 2. Verify GSIs and replicas are created correctly - 3. Test valid update: Update GSIs and replicas simultaneously - 4. Test invalid update: Try to add GSI only to parent but not replicas - 5. Remove all GSIs and replicas - 6. Delete table - """ - (ref, res) = table_with_gsi_and_replicas - table_name = res["spec"]["tableName"] - - # Check DynamoDB Table exists - assert self.table_exists(table_name) - - # Wait for the table to be active - table.wait_until( - table_name, - table.status_matches("ACTIVE"), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, - interval_seconds=15, - ) - - # Step 1: Verify initial GSIs were created - table.wait_until( - table_name, - table.gsi_matches([ - { - "indexName": "GSI1", - "keySchema": [ - {"attributeName": "GSI1PK", "keyType": "HASH"}, - {"attributeName": "GSI1SK", "keyType": "RANGE"} - ], - "projection": { - "projectionType": "ALL" - } - }, - { - "indexName": "GSI2", - "keySchema": [ - {"attributeName": "GSI2PK", "keyType": "HASH"} - ], - "projection": { - "projectionType": "KEYS_ONLY" - } - } - ]), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, - interval_seconds=15, - ) - - # Wait for both initial replicas to be created - table.wait_until( - table_name, - table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, - interval_seconds=15, - ) - - # Verify both GSIs and replicas are in correct state - logging.info("Verifying initial GSIs and replicas...") - table_info = table.get(table_name) - assert "GlobalSecondaryIndexes" in table_info - assert len(table_info["GlobalSecondaryIndexes"]) == 2 - - replicas = table.get_replicas(table_name) - assert replicas is not None - assert len(replicas) == 2 - region_names = [r["RegionName"] for r in replicas] - assert REPLICA_REGION_1 in region_names - assert REPLICA_REGION_2 in region_names - - # Step 2: Valid update - Update GSIs and replicas simultaneously - logging.info( - "Performing valid update: Updating GSIs and replicas simultaneously...") - cr = k8s.wait_resource_consumed_by_controller(ref) - - # Update attribute definitions to include a new GSI3 key - cr["spec"]["attributeDefinitions"] = [ - {"attributeName": "PK", "attributeType": "S"}, - {"attributeName": "SK", "attributeType": "S"}, - {"attributeName": "GSI1PK", "attributeType": "S"}, - {"attributeName": "GSI1SK", "attributeType": "S"}, - {"attributeName": "GSI3PK", "attributeType": "S"}, - ] - - # Remove GSI2, keep GSI1, add GSI3 - cr["spec"]["globalSecondaryIndexes"] = [ - { - "indexName": "GSI1", - "keySchema": [ - {"attributeName": "GSI1PK", "keyType": "HASH"}, - {"attributeName": "GSI1SK", "keyType": "RANGE"} - ], - "projection": { - "projectionType": "ALL" - } - }, - { - "indexName": "GSI3", - "keySchema": [ - {"attributeName": "GSI3PK", "keyType": "HASH"} - ], - "projection": { - "projectionType": "ALL" - } - } - ] - - # Remove REPLICA_REGION_2, keep REPLICA_REGION_1, add REPLICA_REGION_3 - cr["spec"]["replicationGroup"] = [ - {"regionName": REPLICA_REGION_1}, - {"regionName": REPLICA_REGION_3} - ] - - # Patch k8s resource with all changes at once - k8s.patch_custom_resource(ref, cr) - - # Wait for GSIs to be updated - table.wait_until( - table_name, - table.gsi_matches([ - { - "indexName": "GSI1", - "keySchema": [ - {"attributeName": "GSI1PK", "keyType": "HASH"}, - {"attributeName": "GSI1SK", "keyType": "RANGE"} - ], - "projection": { - "projectionType": "ALL" - } - }, - { - "indexName": "GSI3", - "keySchema": [ - {"attributeName": "GSI3PK", "keyType": "HASH"} - ], - "projection": { - "projectionType": "ALL" - } - } - ]), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*2, - interval_seconds=15, - ) - - # Wait for replicas to be updated - table.wait_until( - table_name, - table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_3]), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*2, - interval_seconds=15, - ) - - # Verify updates were applied correctly - table_info = table.get(table_name) - gsi_names = [gsi["IndexName"] - for gsi in table_info["GlobalSecondaryIndexes"]] - assert "GSI1" in gsi_names - assert "GSI3" in gsi_names - assert "GSI2" not in gsi_names - - replicas = table.get_replicas(table_name) - region_names = [r["RegionName"] for r in replicas] - assert REPLICA_REGION_1 in region_names - assert REPLICA_REGION_3 in region_names - assert REPLICA_REGION_2 not in region_names - - # Step 3: Test invalid update - try to add GSI only to parent table - logging.info( - "Testing invalid update: Adding GSI only to parent table but not replicas...") - cr = k8s.wait_resource_consumed_by_controller(ref) - - # Update attribute definitions to include a GSI that would only apply to parent - cr["spec"]["attributeDefinitions"] = [ - {"attributeName": "PK", "attributeType": "S"}, - {"attributeName": "SK", "attributeType": "S"}, - {"attributeName": "GSI1PK", "attributeType": "S"}, - {"attributeName": "GSI1SK", "attributeType": "S"}, - {"attributeName": "GSI3PK", "attributeType": "S"}, - {"attributeName": "GSI4PK", "attributeType": "S"}, - ] - - # Add a new GSI4 and attempt to define it for parent table but not replicas - cr["spec"]["globalSecondaryIndexes"] = [ - { - "indexName": "GSI1", - "keySchema": [ - {"attributeName": "GSI1PK", "keyType": "HASH"}, - {"attributeName": "GSI1SK", "keyType": "RANGE"} - ], - "projection": { - "projectionType": "ALL" - } - }, - { - "indexName": "GSI3", - "keySchema": [ - {"attributeName": "GSI3PK", "keyType": "HASH"} - ], - "projection": { - "projectionType": "ALL" - } - }, - { - "indexName": "GSI4", - "keySchema": [ - {"attributeName": "GSI4PK", "keyType": "HASH"} - ], - "projection": { - "projectionType": "ALL" - }, - "replicaDefinitions": [ - { - "regionName": REPLICA_REGION_1, - "propagateToReplica": False # Not including this in replica - } - ] - } - ] - - # Patch k8s resource with invalid configuration - k8s.patch_custom_resource(ref, cr) - - # Wait for terminal condition to be set or timeout - max_wait_seconds = 60 - interval_seconds = 5 - start_time = time.time() - terminal_condition_set = False - error_message_contains = "GSI must be created on all replicas" - - while time.time() - start_time < max_wait_seconds: - cr = k8s.get_resource(ref) - if cr and "status" in cr and "conditions" in cr["status"]: - for condition_obj in cr["status"]["conditions"]: - if condition_obj["type"] == "ACK.Terminal" and condition_obj["status"] == "True": - if error_message_contains in condition_obj["message"]: - terminal_condition_set = True - logging.info( - f"Found terminal condition: {condition_obj['message']}") - break - - if terminal_condition_set: - break - time.sleep(interval_seconds) - - # Assert that we received a terminal condition for the invalid update - assert terminal_condition_set, "Terminal condition was not set for invalid GSI configuration" - - # Get the latest resource state after invalid update failure - cr = k8s.wait_resource_consumed_by_controller(ref) - - # Step 4: Remove all GSIs and replicas except the minimum required - logging.info("Removing all GSIs and all but one replica...") - - # Update attribute definitions - keep only required for table - cr["spec"]["attributeDefinitions"] = [ - {"attributeName": "PK", "attributeType": "S"}, - {"attributeName": "SK", "attributeType": "S"}, - ] - - # Remove all GSIs - if "globalSecondaryIndexes" in cr["spec"]: - cr["spec"].pop("globalSecondaryIndexes") - - # Keep only the primary region replica - cr["spec"]["replicationGroup"] = [ - {"regionName": REPLICA_REGION_1} - ] - - # Patch k8s resource with all changes at once - k8s.patch_custom_resource(ref, cr) - - # Wait for all GSIs to be deleted - max_wait_seconds = REPLICA_WAIT_AFTER_SECONDS*2 - interval_seconds = 15 - start_time = time.time() - - while time.time() - start_time < max_wait_seconds: - table_info = table.get(table_name) - if "GlobalSecondaryIndexes" not in table_info: - logging.info("All GSIs have been deleted") - break - logging.info( - f"Waiting for GSIs to be deleted, remaining: {len(table_info.get('GlobalSecondaryIndexes', []))}") - time.sleep(interval_seconds) - - # Verify GSIs are deleted - table_info = table.get(table_name) - assert "GlobalSecondaryIndexes" not in table_info, "All GSIs should be deleted" - - # Wait for replicas to be updated - table.wait_until( - table_name, - table.replicas_match([REPLICA_REGION_1]), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*2, - interval_seconds=15, - ) - - # Step 5: Delete the table - logging.info("Deleting the table...") - k8s.delete_custom_resource(ref) - - # Wait for the table to be deleted - max_wait_seconds = REPLICA_WAIT_AFTER_SECONDS*2 - interval_seconds = 15 - start_time = time.time() - - while time.time() - start_time < max_wait_seconds: - if not self.table_exists(table_name): - break - logging.info("Waiting for table to be deleted...") - time.sleep(interval_seconds) - - # Verify table and all resources are deleted - assert not self.table_exists(table_name) - replicas = table.get_replicas(table_name) - assert replicas is None diff --git a/test/e2e/tests/test_table_replicas.py b/test/e2e/tests/test_table_replicas.py index 10b091e..2995293 100644 --- a/test/e2e/tests/test_table_replicas.py +++ b/test/e2e/tests/test_table_replicas.py @@ -188,7 +188,7 @@ def test_add_replica(self, table_with_replicas): cr = k8s.wait_resource_consumed_by_controller(ref) # Remove both initial replicas and add three new ones - cr["spec"]["replicationGroup"] = [ + cr["spec"]["tableReplicas"] = [ {"regionName": REPLICA_REGION_3}, {"regionName": REPLICA_REGION_4}, {"regionName": REPLICA_REGION_5} @@ -266,7 +266,7 @@ def test_remove_replica(self, table_with_replicas): regions_to_remove = [current_regions[-1]] # Remove the last replica - cr["spec"]["replicationGroup"] = [ + cr["spec"]["tableReplicas"] = [ {"regionName": region} for region in regions_to_keep ] @@ -350,134 +350,134 @@ def test_terminal_condition_for_invalid_stream_specification(self, table_with_in assert terminal_condition_set, "Terminal condition was not set for invalid StreamSpecification" - def test_simultaneous_updates(self, table_with_replicas): - (ref, res) = table_with_replicas - - table_name = res["spec"]["tableName"] - - # Check DynamoDB Table exists - assert self.table_exists(table_name) - - # Wait for the initial replicas to be created and be active - table.wait_until( - table_name, - table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, - interval_seconds=30, - ) - - for region in [REPLICA_REGION_1, REPLICA_REGION_2]: - table.wait_until( - table_name, - table.replica_status_matches(region, "ACTIVE"), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, - interval_seconds=30, - ) - - # Get CR latest revision - cr = k8s.wait_resource_consumed_by_controller(ref) - - # Prepare simultaneous updates to multiple fields: - # 1. Update tags - # 2. Change replicas - # 3. Add GSI - - # Add attribute definitions needed for GSI - cr["spec"]["attributeDefinitions"] = [ - {"attributeName": "PK", "attributeType": "S"}, - {"attributeName": "SK", "attributeType": "S"}, - {"attributeName": "GSI1PK", "attributeType": "S"}, - {"attributeName": "GSI1SK", "attributeType": "S"} - ] - - # Add a GSI - cr["spec"]["globalSecondaryIndexes"] = [{ - "indexName": "GSI1", - "keySchema": [ - {"attributeName": "GSI1PK", "keyType": "HASH"}, - {"attributeName": "GSI1SK", "keyType": "RANGE"} - ], - "projection": { - "projectionType": "ALL" - } - }] - - # Update replicas - remove one and add a different one - # Set directly rather than depending on current replicas - cr["spec"]["replicationGroup"] = [ - {"regionName": REPLICA_REGION_1}, # Keep the first defined region - {"regionName": REPLICA_REGION_3} # Add a new region - ] - - # Update tags - cr["spec"]["tags"] = [ - {"key": "Environment", "value": "Test"}, - {"key": "Purpose", "value": "SimontemourTest"}, - {"key": "UpdatedAt", "value": time.strftime("%Y-%m-%d")} - ] - - # Patch k8s resource with all changes at once - k8s.patch_custom_resource(ref, cr) - - # Wait for GSI to be created (usually the slowest operation) - table.wait_until( - table_name, - table.gsi_matches([{ - "indexName": "GSI1", - "keySchema": [ - {"attributeName": "GSI1PK", "keyType": "HASH"}, - {"attributeName": "GSI1SK", "keyType": "RANGE"} - ], - "projection": { - "projectionType": "ALL" - } - }]), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*3, - interval_seconds=30, - ) - - # Wait for replicas to be updated - expected_regions = [REPLICA_REGION_1, REPLICA_REGION_3] - table.wait_until( - table_name, - table.replicas_match(expected_regions), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*3, - interval_seconds=30, - ) - - # Verify all changes were applied - - # Check tags - table_tags = get_resource_tags( - cr["status"]["ackResourceMetadata"]["arn"]) - tags.assert_ack_system_tags(tags=table_tags) - - expected_tags = { - "Environment": "Test", - "Purpose": "SimontemourTest", - "UpdatedAt": time.strftime("%Y-%m-%d") - } - - # Verify custom tags (ignoring ACK system tags) - for key, value in expected_tags.items(): - assert key in table_tags - assert table_tags[key] == value - - # Verify GSI - table_info = table.get(table_name) - assert "GlobalSecondaryIndexes" in table_info - assert len(table_info["GlobalSecondaryIndexes"]) == 1 - assert table_info["GlobalSecondaryIndexes"][0]["IndexName"] == "GSI1" - - # Verify replicas - replicas = table.get_replicas(table_name) - assert replicas is not None - assert len(replicas) == 2 - region_names = [r["RegionName"] for r in replicas] - assert REPLICA_REGION_1 in region_names - assert REPLICA_REGION_3 in region_names - assert REPLICA_REGION_2 not in region_names - - # Verify all replicas are active - for replica in replicas: - assert replica["ReplicaStatus"] == "ACTIVE" + # def test_simultaneous_updates(self, table_with_replicas): + # (ref, res) = table_with_replicas + + # table_name = res["spec"]["tableName"] + + # # Check DynamoDB Table exists + # assert self.table_exists(table_name) + + # # Wait for the initial replicas to be created and be active + # table.wait_until( + # table_name, + # table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), + # timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + # interval_seconds=30, + # ) + + # for region in [REPLICA_REGION_1, REPLICA_REGION_2]: + # table.wait_until( + # table_name, + # table.replica_status_matches(region, "ACTIVE"), + # timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + # interval_seconds=30, + # ) + + # # Get CR latest revision + # cr = k8s.wait_resource_consumed_by_controller(ref) + + # # Prepare simultaneous updates to multiple fields: + # # 1. Update tags + # # 2. Change replicas + # # 3. Add GSI + + # # Add attribute definitions needed for GSI + # cr["spec"]["attributeDefinitions"] = [ + # {"attributeName": "PK", "attributeType": "S"}, + # {"attributeName": "SK", "attributeType": "S"}, + # {"attributeName": "GSI1PK", "attributeType": "S"}, + # {"attributeName": "GSI1SK", "attributeType": "S"} + # ] + + # # Add a GSI + # cr["spec"]["globalSecondaryIndexes"] = [{ + # "indexName": "GSI1", + # "keySchema": [ + # {"attributeName": "GSI1PK", "keyType": "HASH"}, + # {"attributeName": "GSI1SK", "keyType": "RANGE"} + # ], + # "projection": { + # "projectionType": "ALL" + # } + # }] + + # # Update replicas - remove one and add a different one + # # Set directly rather than depending on current replicas + # cr["spec"]["tableReplicas"] = [ + # {"regionName": REPLICA_REGION_1}, # Keep the first defined region + # {"regionName": REPLICA_REGION_3} # Add a new region + # ] + + # # Update tags + # cr["spec"]["tags"] = [ + # {"key": "Environment", "value": "Test"}, + # {"key": "Purpose", "value": "SimontemourTest"}, + # {"key": "UpdatedAt", "value": time.strftime("%Y-%m-%d")} + # ] + + # # Patch k8s resource with all changes at once + # k8s.patch_custom_resource(ref, cr) + + # # Wait for GSI to be created (usually the slowest operation) + # table.wait_until( + # table_name, + # table.gsi_matches([{ + # "indexName": "GSI1", + # "keySchema": [ + # {"attributeName": "GSI1PK", "keyType": "HASH"}, + # {"attributeName": "GSI1SK", "keyType": "RANGE"} + # ], + # "projection": { + # "projectionType": "ALL" + # } + # }]), + # timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*3, + # interval_seconds=30, + # ) + + # # Wait for replicas to be updated + # expected_regions = [REPLICA_REGION_1, REPLICA_REGION_3] + # table.wait_until( + # table_name, + # table.replicas_match(expected_regions), + # timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*3, + # interval_seconds=30, + # ) + + # # Verify all changes were applied + + # # Check tags + # table_tags = get_resource_tags( + # cr["status"]["ackResourceMetadata"]["arn"]) + # tags.assert_ack_system_tags(tags=table_tags) + + # expected_tags = { + # "Environment": "Test", + # "Purpose": "SimontemourTest", + # "UpdatedAt": time.strftime("%Y-%m-%d") + # } + + # # Verify custom tags (ignoring ACK system tags) + # for key, value in expected_tags.items(): + # assert key in table_tags + # assert table_tags[key] == value + + # # Verify GSI + # table_info = table.get(table_name) + # assert "GlobalSecondaryIndexes" in table_info + # assert len(table_info["GlobalSecondaryIndexes"]) == 1 + # assert table_info["GlobalSecondaryIndexes"][0]["IndexName"] == "GSI1" + + # # Verify replicas + # replicas = table.get_replicas(table_name) + # assert replicas is not None + # assert len(replicas) == 2 + # region_names = [r["RegionName"] for r in replicas] + # assert REPLICA_REGION_1 in region_names + # assert REPLICA_REGION_3 in region_names + # assert REPLICA_REGION_2 not in region_names + + # # Verify all replicas are active + # for replica in replicas: + # assert replica["ReplicaStatus"] == "ACTIVE" From 08ebf414c6f05010af1f539af093cd7549da929e Mon Sep 17 00:00:00 2001 From: Arush Sharma Date: Mon, 24 Mar 2025 13:38:03 -0700 Subject: [PATCH 4/5] delete fixes --- apis/v1alpha1/ack-generate-metadata.yaml | 2 +- pkg/resource/table/hooks.go | 8 +- pkg/resource/table/hooks_replica_updates.go | 134 +++--- pkg/resource/table/sdk.go | 43 +- .../table/sdk_delete_pre_build_request.go.tpl | 3 - .../table/sdk_read_one_post_set_output.go.tpl | 40 +- .../table_with_gsi_and_replicas.yaml | 16 +- test/e2e/tests/test_table_replicas.py | 440 ++++++++---------- 8 files changed, 292 insertions(+), 394 deletions(-) diff --git a/apis/v1alpha1/ack-generate-metadata.yaml b/apis/v1alpha1/ack-generate-metadata.yaml index 28231d7..a456140 100755 --- a/apis/v1alpha1/ack-generate-metadata.yaml +++ b/apis/v1alpha1/ack-generate-metadata.yaml @@ -1,5 +1,5 @@ ack_generate_info: - build_date: "2025-03-23T21:30:39Z" + build_date: "2025-03-24T15:20:40Z" build_hash: 3722729cebe6d3c03c7e442655ef0846f91566a2 go_version: go1.24.0 version: v0.43.2-7-g3722729 diff --git a/pkg/resource/table/hooks.go b/pkg/resource/table/hooks.go index bb094c9..16d51eb 100644 --- a/pkg/resource/table/hooks.go +++ b/pkg/resource/table/hooks.go @@ -234,15 +234,13 @@ func (rm *resourceManager) customUpdateTable( return nil, err } case delta.DifferentAt("Spec.TableReplicas"): - if !hasStreamSpecificationWithNewAndOldImages(desired) { + // Enabling replicas required streams enabled and StreamViewType to be NEW_AND_OLD_IMAGES + // Version 2019.11.21 TableUpdate API requirement + if !hasStreamSpecificationWithNewAndOldImages(desired) { msg := "table must have DynamoDB Streams enabled with StreamViewType set to NEW_AND_OLD_IMAGES for replica updates" rlog.Debug(msg) return nil, ackerr.NewTerminalError(errors.New(msg)) } - - if !canUpdateTableReplicas(latest) { - return nil, requeueWaitReplicasActive - } if err := rm.syncReplicas(ctx, latest, desired); err != nil { return nil, err } diff --git a/pkg/resource/table/hooks_replica_updates.go b/pkg/resource/table/hooks_replica_updates.go index 18c616e..12bc5d9 100644 --- a/pkg/resource/table/hooks_replica_updates.go +++ b/pkg/resource/table/hooks_replica_updates.go @@ -15,17 +15,14 @@ package table import ( "context" - "strings" - "time" ackrtlog "github.com/aws-controllers-k8s/runtime/pkg/runtime/log" - ackerr "github.com/aws-controllers-k8s/runtime/pkg/errors" - ackrequeue "github.com/aws-controllers-k8s/runtime/pkg/requeue" "github.com/aws/aws-sdk-go-v2/aws" svcsdk "github.com/aws/aws-sdk-go-v2/service/dynamodb" svcsdktypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "github.com/aws-controllers-k8s/dynamodb-controller/apis/v1alpha1" + svcapitypes "github.com/aws-controllers-k8s/dynamodb-controller/apis/v1alpha1" ) // equalCreateReplicationGroupMemberActions compares two CreateReplicationGroupMemberAction objects @@ -178,7 +175,7 @@ func createReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) s for _, gsi := range replica.GlobalSecondaryIndexes { replicaGSI := svcsdktypes.ReplicaGlobalSecondaryIndex{} if gsi.IndexName != nil { - replicaGSI.IndexName = aws.String(*gsi.IndexName) + replicaGSI.IndexName = gsi.IndexName } if gsi.ProvisionedThroughputOverride != nil { replicaGSI.ProvisionedThroughputOverride = &svcsdktypes.ProvisionedThroughputOverride{} @@ -250,7 +247,9 @@ func updateReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) s } // If no valid updates, return an empty ReplicationGroupUpdate - return svcsdktypes.ReplicationGroupUpdate{} + return svcsdktypes.ReplicationGroupUpdate{ + Update: nil, + } } // deleteReplicaUpdate creates a ReplicationGroupUpdate for deleting an existing replica @@ -262,25 +261,6 @@ func deleteReplicaUpdate(regionName string) svcsdktypes.ReplicationGroupUpdate { } } -// canUpdateTableReplicas returns true if it's possible to update table replicas. -// We can only modify replicas when they are in ACTIVE state. -func canUpdateTableReplicas(r *resource) bool { - // Check if Status or Replicas is nil - // needed when called by sdkdelete - if r == nil || r.ko == nil || r.ko.Status.Replicas == nil { - return true // If no replicas exist, we can proceed with updates - } - // Check if any replica is not in ACTIVE state - for _, replicaDesc := range r.ko.Status.Replicas { - if replicaDesc.RegionName != nil && replicaDesc.ReplicaStatus != nil { - if *replicaDesc.ReplicaStatus != string(svcsdktypes.ReplicaStatusActive) { - return false - } - } - } - return true -} - // hasStreamSpecificationWithNewAndOldImages checks if the table has DynamoDB Streams enabled // with the stream containing both the new and the old images of the item. func hasStreamSpecificationWithNewAndOldImages(r *resource) bool { @@ -314,33 +294,7 @@ func (rm *resourceManager) syncReplicas( _, err = rm.sdkapi.UpdateTable(ctx, input) rm.metrics.RecordAPICall("UPDATE", "UpdateTable", err) if err != nil { - // Handle specific errors - if awsErr, ok := ackerr.AWSError(err); ok { - // Handle ValidationException - when replicas are not part of the global table - if awsErr.ErrorCode() == "ValidationException" && - strings.Contains(awsErr.ErrorMessage(), "not part of the global table") { - // A replica was already deleted - rlog.Debug("replica already deleted from global table", - "table", *latest.ko.Spec.TableName, - "error", awsErr.ErrorMessage()) - return ackrequeue.NeededAfter( - ErrTableUpdating, - 30*time.Second, - ) - } - - // Handle ResourceInUseException - when the table is being updated - if awsErr.ErrorCode() == "ResourceInUseException" { - rlog.Debug("table is currently in use, will retry", - "table", *latest.ko.Spec.TableName, - "error", awsErr.ErrorMessage()) - return ackrequeue.NeededAfter( - ErrTableUpdating, - 30*time.Second, - ) - } - return err - } + return err } // If there are more replicas to process, requeue @@ -367,7 +321,7 @@ func (rm *resourceManager) newUpdateTableReplicaUpdatesOneAtATimePayload( exit(err) }() - createReplicas, updateReplicas, deleteRegions := calculateReplicaUpdates(latest, desired) + createReplicas, updateReplicas, deleteRegions := computeReplicaupdatesDelta(latest, desired) input = &svcsdk.UpdateTableInput{ TableName: aws.String(*desired.ko.Spec.TableName), @@ -382,6 +336,9 @@ func (rm *resourceManager) newUpdateTableReplicaUpdatesOneAtATimePayload( if len(createReplicas) > 0 { replica := *createReplicas[0] + if checkIfReplicasInProgress(latest.ko.Status.Replicas, *replica.RegionName) { + return nil, 0, requeueWaitReplicasActive + } rlog.Debug("creating replica in region", "table", *desired.ko.Spec.TableName, "region", *replica.RegionName) input.ReplicaUpdates = append(input.ReplicaUpdates, createReplicaUpdate(createReplicas[0])) return input, replicasInQueue, nil @@ -389,13 +346,23 @@ func (rm *resourceManager) newUpdateTableReplicaUpdatesOneAtATimePayload( if len(updateReplicas) > 0 { replica := *updateReplicas[0] + if checkIfReplicasInProgress(latest.ko.Status.Replicas, *replica.RegionName) { + return nil, 0, requeueWaitReplicasActive + } rlog.Debug("updating replica in region", "table", *desired.ko.Spec.TableName, "region", *replica.RegionName) - input.ReplicaUpdates = append(input.ReplicaUpdates, updateReplicaUpdate(updateReplicas[0])) + updateReplica := updateReplicaUpdate(updateReplicas[0]) + if updateReplica.Update == nil { + return nil, 0, requeueWaitReplicasActive + } + input.ReplicaUpdates = append(input.ReplicaUpdates, updateReplica) return input, replicasInQueue, nil } if len(deleteRegions) > 0 { replica := deleteRegions[0] + if checkIfReplicasInProgress(latest.ko.Status.Replicas, replica) { + return nil, 0, requeueWaitReplicasActive + } rlog.Debug("deleting replica in region", "table", *desired.ko.Spec.TableName, "region", replica) input.ReplicaUpdates = append(input.ReplicaUpdates, deleteReplicaUpdate(deleteRegions[0])) return input, replicasInQueue, nil @@ -404,9 +371,9 @@ func (rm *resourceManager) newUpdateTableReplicaUpdatesOneAtATimePayload( return input, replicasInQueue, nil } -// calculateReplicaUpdates calculates the replica updates needed to reconcile the latest state with the desired state +// computeReplicaupdatesDelta calculates the replica updates needed to reconcile the latest state with the desired state // Returns three slices: replicas to create, replicas to update, and region names to delete -func calculateReplicaUpdates( +func computeReplicaupdatesDelta( latest *resource, desired *resource, ) ( @@ -451,3 +418,58 @@ func calculateReplicaUpdates( return createReplicas, updateReplicas, deleteRegions } + +func setTableReplicas(ko *svcapitypes.Table, replicas []svcsdktypes.ReplicaDescription) { + if len(replicas) > 0 { + tableReplicas := []*v1alpha1.CreateReplicationGroupMemberAction{} + for _, replica := range replicas { + replicaElem := &v1alpha1.CreateReplicationGroupMemberAction{} + if replica.RegionName != nil { + replicaElem.RegionName = replica.RegionName + } + if replica.KMSMasterKeyId != nil { + replicaElem.KMSMasterKeyID = replica.KMSMasterKeyId + } + if replica.ProvisionedThroughputOverride != nil { + replicaElem.ProvisionedThroughputOverride = &v1alpha1.ProvisionedThroughputOverride{ + ReadCapacityUnits: replica.ProvisionedThroughputOverride.ReadCapacityUnits, + } + } + if replica.GlobalSecondaryIndexes != nil { + gsiList := []*v1alpha1.ReplicaGlobalSecondaryIndex{} + for _, gsi := range replica.GlobalSecondaryIndexes { + gsiElem := &v1alpha1.ReplicaGlobalSecondaryIndex{ + IndexName: gsi.IndexName, + } + if gsi.ProvisionedThroughputOverride != nil { + gsiElem.ProvisionedThroughputOverride = &v1alpha1.ProvisionedThroughputOverride{ + ReadCapacityUnits: gsi.ProvisionedThroughputOverride.ReadCapacityUnits, + } + } + gsiList = append(gsiList, gsiElem) + } + replicaElem.GlobalSecondaryIndexes = gsiList + } + if replica.ReplicaTableClassSummary != nil && replica.ReplicaTableClassSummary.TableClass != "" { + replicaElem.TableClassOverride = aws.String(string(replica.ReplicaTableClassSummary.TableClass)) + } + tableReplicas = append(tableReplicas, replicaElem) + } + ko.Spec.TableReplicas = tableReplicas + } else { + ko.Spec.TableReplicas = nil + } +} + +func checkIfReplicasInProgress(ReplicaDescription []*svcapitypes.ReplicaDescription, regionName string) bool { + for _, replica := range ReplicaDescription { + if *replica.RegionName == regionName { + replicaStatus := replica.ReplicaStatus + if *replicaStatus == string(svcsdktypes.ReplicaStatusCreating) || *replicaStatus == string(svcsdktypes.ReplicaStatusDeleting) || *replicaStatus == string(svcsdktypes.ReplicaStatusUpdating) { + return true + } + } + } + + return false +} diff --git a/pkg/resource/table/sdk.go b/pkg/resource/table/sdk.go index d93c081..659bfae 100644 --- a/pkg/resource/table/sdk.go +++ b/pkg/resource/table/sdk.go @@ -440,45 +440,7 @@ func (rm *resourceManager) sdkFind( } else { ko.Spec.BillingMode = aws.String("PROVISIONED") } - if len(resp.Table.Replicas) > 0 { - tableReplicas := []*svcapitypes.CreateReplicationGroupMemberAction{} - for _, replica := range resp.Table.Replicas { - replicaElem := &svcapitypes.CreateReplicationGroupMemberAction{} - if replica.RegionName != nil { - replicaElem.RegionName = replica.RegionName - } - if replica.KMSMasterKeyId != nil { - replicaElem.KMSMasterKeyID = replica.KMSMasterKeyId - } - if replica.ProvisionedThroughputOverride != nil { - replicaElem.ProvisionedThroughputOverride = &svcapitypes.ProvisionedThroughputOverride{ - ReadCapacityUnits: replica.ProvisionedThroughputOverride.ReadCapacityUnits, - } - } - if replica.GlobalSecondaryIndexes != nil { - gsiList := []*svcapitypes.ReplicaGlobalSecondaryIndex{} - for _, gsi := range replica.GlobalSecondaryIndexes { - gsiElem := &svcapitypes.ReplicaGlobalSecondaryIndex{ - IndexName: gsi.IndexName, - } - if gsi.ProvisionedThroughputOverride != nil { - gsiElem.ProvisionedThroughputOverride = &svcapitypes.ProvisionedThroughputOverride{ - ReadCapacityUnits: gsi.ProvisionedThroughputOverride.ReadCapacityUnits, - } - } - gsiList = append(gsiList, gsiElem) - } - replicaElem.GlobalSecondaryIndexes = gsiList - } - if replica.ReplicaTableClassSummary != nil && replica.ReplicaTableClassSummary.TableClass != "" { - replicaElem.TableClassOverride = aws.String(string(replica.ReplicaTableClassSummary.TableClass)) - } - tableReplicas = append(tableReplicas, replicaElem) - } - ko.Spec.TableReplicas = tableReplicas - } else { - ko.Spec.TableReplicas = nil - } + setTableReplicas(ko, resp.Table.Replicas) if isTableCreating(&resource{ko}) { return &resource{ko}, requeueWaitWhileCreating } @@ -1057,9 +1019,6 @@ func (rm *resourceManager) sdkDelete( } // If there are replicas, we need to remove them before deleting the table - if !canUpdateTableReplicas(latest) { - return nil, requeueWaitReplicasActive - } if len(r.ko.Spec.TableReplicas) > 0 { desired := &resource{ ko: r.ko.DeepCopy(), diff --git a/templates/hooks/table/sdk_delete_pre_build_request.go.tpl b/templates/hooks/table/sdk_delete_pre_build_request.go.tpl index 6d67ae9..476452f 100644 --- a/templates/hooks/table/sdk_delete_pre_build_request.go.tpl +++ b/templates/hooks/table/sdk_delete_pre_build_request.go.tpl @@ -6,9 +6,6 @@ } // If there are replicas, we need to remove them before deleting the table - if !canUpdateTableReplicas(latest) { - return nil, requeueWaitReplicasActive - } if len(r.ko.Spec.TableReplicas) > 0 { desired := &resource{ ko: r.ko.DeepCopy(), diff --git a/templates/hooks/table/sdk_read_one_post_set_output.go.tpl b/templates/hooks/table/sdk_read_one_post_set_output.go.tpl index f983d5c..c8a174d 100644 --- a/templates/hooks/table/sdk_read_one_post_set_output.go.tpl +++ b/templates/hooks/table/sdk_read_one_post_set_output.go.tpl @@ -53,45 +53,7 @@ } else { ko.Spec.BillingMode = aws.String("PROVISIONED") } - if len(resp.Table.Replicas) > 0 { - tableReplicas := []*svcapitypes.CreateReplicationGroupMemberAction{} - for _, replica := range resp.Table.Replicas { - replicaElem := &svcapitypes.CreateReplicationGroupMemberAction{} - if replica.RegionName != nil { - replicaElem.RegionName = replica.RegionName - } - if replica.KMSMasterKeyId != nil { - replicaElem.KMSMasterKeyID = replica.KMSMasterKeyId - } - if replica.ProvisionedThroughputOverride != nil { - replicaElem.ProvisionedThroughputOverride = &svcapitypes.ProvisionedThroughputOverride{ - ReadCapacityUnits: replica.ProvisionedThroughputOverride.ReadCapacityUnits, - } - } - if replica.GlobalSecondaryIndexes != nil { - gsiList := []*svcapitypes.ReplicaGlobalSecondaryIndex{} - for _, gsi := range replica.GlobalSecondaryIndexes { - gsiElem := &svcapitypes.ReplicaGlobalSecondaryIndex{ - IndexName: gsi.IndexName, - } - if gsi.ProvisionedThroughputOverride != nil { - gsiElem.ProvisionedThroughputOverride = &svcapitypes.ProvisionedThroughputOverride{ - ReadCapacityUnits: gsi.ProvisionedThroughputOverride.ReadCapacityUnits, - } - } - gsiList = append(gsiList, gsiElem) - } - replicaElem.GlobalSecondaryIndexes = gsiList - } - if replica.ReplicaTableClassSummary != nil && replica.ReplicaTableClassSummary.TableClass != "" { - replicaElem.TableClassOverride = aws.String(string(replica.ReplicaTableClassSummary.TableClass)) - } - tableReplicas = append(tableReplicas, replicaElem) - } - ko.Spec.TableReplicas = tableReplicas - } else { - ko.Spec.TableReplicas = nil - } + setTableReplicas(r, resp.Table.Replicas) if isTableCreating(&resource{ko}) { return &resource{ko}, requeueWaitWhileCreating } diff --git a/test/e2e/resources/table_with_gsi_and_replicas.yaml b/test/e2e/resources/table_with_gsi_and_replicas.yaml index 2ee6769..bb8298c 100644 --- a/test/e2e/resources/table_with_gsi_and_replicas.yaml +++ b/test/e2e/resources/table_with_gsi_and_replicas.yaml @@ -14,8 +14,6 @@ spec: attributeType: S - attributeName: GSI1SK attributeType: S - - attributeName: GSI2PK - attributeType: S keySchema: - attributeName: PK keyType: HASH @@ -25,9 +23,13 @@ spec: streamSpecification: streamEnabled: true streamViewType: "NEW_AND_OLD_IMAGES" - replicationGroup: + tableReplicas: - regionName: $REPLICA_REGION_1 + globalSecondaryIndexes: + - indexName: GSI1 - regionName: $REPLICA_REGION_2 + globalSecondaryIndexes: + - indexName: GSI1 globalSecondaryIndexes: - indexName: GSI1 keySchema: @@ -36,10 +38,4 @@ spec: - attributeName: GSI1SK keyType: RANGE projection: - projectionType: ALL - - indexName: GSI2 - keySchema: - - attributeName: GSI2PK - keyType: HASH - projection: - projectionType: KEYS_ONLY \ No newline at end of file + projectionType: ALL \ No newline at end of file diff --git a/test/e2e/tests/test_table_replicas.py b/test/e2e/tests/test_table_replicas.py index 2995293..76e0743 100644 --- a/test/e2e/tests/test_table_replicas.py +++ b/test/e2e/tests/test_table_replicas.py @@ -23,15 +23,15 @@ from acktest import tags from acktest.k8s import resource as k8s from acktest.resources import random_suffix_name -from e2e import (CRD_GROUP, CRD_VERSION, condition, get_resource_tags, - load_dynamodb_resource, service_marker, table, - wait_for_cr_status) +from e2e import (CRD_GROUP, CRD_VERSION, condition, + load_dynamodb_resource, service_marker, table) from e2e.replacement_values import REPLACEMENT_VALUES +from acktest.k8s import condition RESOURCE_PLURAL = "tables" DELETE_WAIT_AFTER_SECONDS = 30 -MODIFY_WAIT_AFTER_SECONDS = 180 +MODIFY_WAIT_AFTER_SECONDS = 600 REPLICA_WAIT_AFTER_SECONDS = 600 REPLICA_REGION_1 = "us-east-1" @@ -70,7 +70,7 @@ def create_table_with_replicas(name: str, resource_template, regions=None): return (ref, cr) -@pytest.fixture(scope="module") +@pytest.fixture(scope="function") def table_with_replicas(): table_name = random_suffix_name("table-replicas", 32) @@ -80,13 +80,27 @@ def table_with_replicas(): [REPLICA_REGION_1, REPLICA_REGION_2] ) + # Wait for table to be ACTIVE before proceeding + table.wait_until( + table_name, + table.status_matches("ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + # Wait for initial replicas to be ACTIVE before yielding + table.wait_until( + table_name, + table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + yield (ref, res) - # Delete the k8s resource if it still exists - if k8s.get_resource_exists(ref): - k8s.delete_custom_resource(ref) - time.sleep(DELETE_WAIT_AFTER_SECONDS) + deleted = k8s.delete_custom_resource(ref) + assert deleted def create_table_with_invalid_replicas(name: str): replacements = REPLACEMENT_VALUES.copy() @@ -127,6 +141,39 @@ def table_with_invalid_replicas(): time.sleep(DELETE_WAIT_AFTER_SECONDS) +@pytest.fixture(scope="function") +def table_replicas_gsi(): + table_name = random_suffix_name("table-replicas-gsi", 32) + replacements = REPLACEMENT_VALUES.copy() + replacements["TABLE_NAME"] = table_name + replacements["REPLICA_REGION_1"] = REPLICA_REGION_1 + replacements["REPLICA_REGION_2"] = REPLICA_REGION_2 + + resource_data = load_dynamodb_resource( + "table_with_gsi_and_replicas", + additional_replacements=replacements, + ) + + ref = k8s.CustomResourceReference( + CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, + table_name, namespace="default", + ) + k8s.create_custom_resource(ref, resource_data) + cr = k8s.wait_resource_consumed_by_controller(ref) + + table.wait_until( + table_name, + table.status_matches("ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + yield (ref, cr) + + deleted = k8s.delete_custom_resource(ref) + time.sleep(DELETE_WAIT_AFTER_SECONDS) + assert deleted + @service_marker @pytest.mark.canary class TestTableReplicas: @@ -134,30 +181,13 @@ def table_exists(self, table_name: str) -> bool: return table.get(table_name) is not None def test_create_table_with_replicas(self, table_with_replicas): - (ref, res) = table_with_replicas - + (_, res) = table_with_replicas table_name = res["spec"]["tableName"] - # Check DynamoDB Table exists - assert self.table_exists(table_name) + # Table should already be ACTIVE from fixture + assert table.get(table_name) is not None - # Wait for the table to be active - table.wait_until( - table_name, - table.status_matches("ACTIVE"), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, - interval_seconds=30, - ) - - # Wait for both initial replicas to be created - table.wait_until( - table_name, - table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), - timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, - interval_seconds=30, - ) - - # Wait for both replicas to be active + # Verify replicas exist and are ACTIVE for region in [REPLICA_REGION_1, REPLICA_REGION_2]: table.wait_until( table_name, @@ -166,38 +196,26 @@ def test_create_table_with_replicas(self, table_with_replicas): interval_seconds=30, ) - # Verify the replica exists - replicas = table.get_replicas(table_name) - assert replicas is not None - assert len(replicas) == 2 - region_names = [r["RegionName"] for r in replicas] - assert REPLICA_REGION_1 in region_names - assert REPLICA_REGION_2 in region_names - for replica in replicas: - assert replica["ReplicaStatus"] == "ACTIVE" - def test_add_replica(self, table_with_replicas): (ref, res) = table_with_replicas - table_name = res["spec"]["tableName"] - # Check DynamoDB Table exists - assert self.table_exists(table_name) - - # Get CR latest revision - cr = k8s.wait_resource_consumed_by_controller(ref) + assert table.get(table_name) is not None + table.wait_until( + table_name, + table.status_matches("ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) - # Remove both initial replicas and add three new ones + # Update replicas + cr = k8s.get_resource(ref) cr["spec"]["tableReplicas"] = [ {"regionName": REPLICA_REGION_3}, {"regionName": REPLICA_REGION_4}, {"regionName": REPLICA_REGION_5} ] - - # Patch k8s resource k8s.patch_custom_resource(ref, cr) - - # Wait for the replicas to be updated table.wait_until( table_name, table.replicas_match( @@ -206,7 +224,7 @@ def test_add_replica(self, table_with_replicas): interval_seconds=30, ) - # Wait for all new replicas to be active + # Verify all replicas are ACTIVE for region in [REPLICA_REGION_3, REPLICA_REGION_4, REPLICA_REGION_5]: table.wait_until( table_name, @@ -215,27 +233,12 @@ def test_add_replica(self, table_with_replicas): interval_seconds=30, ) - # Verify replicas - replicas = table.get_replicas(table_name) - assert replicas is not None - assert len(replicas) == 3 - - region_names = [r["RegionName"] for r in replicas] - for region in [REPLICA_REGION_3, REPLICA_REGION_4, REPLICA_REGION_5]: - assert region in region_names - - for replica in replicas: - assert replica["ReplicaStatus"] == "ACTIVE" - def test_remove_replica(self, table_with_replicas): (ref, res) = table_with_replicas - table_name = res["spec"]["tableName"] - # Check DynamoDB Table exists assert self.table_exists(table_name) - # Wait for the initial replicas to be created and be active table.wait_until( table_name, table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), @@ -251,26 +254,21 @@ def test_remove_replica(self, table_with_replicas): interval_seconds=30, ) - # Get CR latest revision cr = k8s.wait_resource_consumed_by_controller(ref) current_replicas = table.get_replicas(table_name) assert current_replicas is not None assert len(current_replicas) >= 1 - # Get the region names of the current replicas current_regions = [r["RegionName"] for r in current_replicas] logging.info(f"Current replicas: {current_regions}") - # Keep all but the last replica regions_to_keep = current_regions[:-1] regions_to_remove = [current_regions[-1]] - # Remove the last replica cr["spec"]["tableReplicas"] = [ {"regionName": region} for region in regions_to_keep ] - # Patch k8s resource k8s.patch_custom_resource(ref, cr) # Wait for the replica to be removed @@ -286,7 +284,6 @@ def test_remove_replica(self, table_with_replicas): assert replicas is not None assert len(replicas) == len(regions_to_keep) - # Check that removed region is gone and kept regions are present region_names = [r["RegionName"] for r in replicas] for region in regions_to_keep: assert region in region_names @@ -297,187 +294,154 @@ def test_delete_table_with_replicas(self, table_with_replicas): (ref, res) = table_with_replicas table_name = res["spec"]["tableName"] + assert self.table_exists(table_name) - # Check DynamoDB Table exists + def test_terminal_condition_for_invalid_stream_specification(self, table_with_invalid_replicas): + (ref, res) = table_with_invalid_replicas + + table_name = res["spec"]["tableName"] assert self.table_exists(table_name) - # Delete the k8s resource - k8s.delete_custom_resource(ref) + max_wait_seconds = 120 + interval_seconds = 10 + start_time = time.time() + terminal_condition_found = False + while time.time() - start_time < max_wait_seconds: + try: + condition.assert_type_status( + ref, + condition.CONDITION_TYPE_TERMINAL, + True) + + terminal_condition_found = True + cond = k8s.get_resource_condition( + ref, condition.CONDITION_TYPE_TERMINAL) + assert "table must have DynamoDB Streams enabled with StreamViewType set to NEW_AND_OLD_IMAGES" in cond[ + "message"] + break + except: + time.sleep(interval_seconds) + + assert terminal_condition_found, "Terminal condition was not set for invalid StreamSpecification" + + def test_staged_replicas_and_gsi_updates(self, table_replicas_gsi): + (ref, cr) = table_replicas_gsi + table_name = cr["spec"]["tableName"] max_wait_seconds = REPLICA_WAIT_AFTER_SECONDS interval_seconds = 30 start_time = time.time() while time.time() - start_time < max_wait_seconds: - if not self.table_exists(table_name): + if self.table_exists(table_name): break time.sleep(interval_seconds) + assert self.table_exists(table_name) - # Verify the table was deleted - assert not self.table_exists(table_name) - replicas = table.get_replicas(table_name) - assert replicas is None + table.wait_until( + table_name, + table.gsi_matches([{ + "indexName": "GSI1", + "keySchema": [ + {"attributeName": "GSI1PK", "keyType": "HASH"}, + {"attributeName": "GSI1SK", "keyType": "RANGE"} + ], + "projection": { + "projectionType": "ALL" + } + }]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) - def test_terminal_condition_for_invalid_stream_specification(self, table_with_invalid_replicas): - (ref, res) = table_with_invalid_replicas + # Step 2: Update - add second GSI and two more replicas + cr = k8s.wait_resource_consumed_by_controller(ref) - table_name = res["spec"]["tableName"] + # Add attribute definition needed for GSI2 + cr["spec"]["attributeDefinitions"].append( + {"attributeName": "GSI2PK", "attributeType": "S"} + ) - # Check DynamoDB Table exists - assert self.table_exists(table_name) + # Add GSI2 + cr["spec"]["globalSecondaryIndexes"].append({ + "indexName": "GSI2", + "keySchema": [ + {"attributeName": "GSI2PK", "keyType": "HASH"} + ], + "projection": { + "projectionType": "KEYS_ONLY" + } + }) + + # Add two more replicas + cr["spec"]["tableReplicas"] = [ + {"regionName": REPLICA_REGION_1, + "globalSecondaryIndexes": [{"indexName": "GSI1"}, {"indexName": "GSI2"}]}, + {"regionName": REPLICA_REGION_2, + "globalSecondaryIndexes": [{"indexName": "GSI1"}, {"indexName": "GSI2"}]}, + {"regionName": REPLICA_REGION_3, + "globalSecondaryIndexes": [{"indexName": "GSI1"}, {"indexName": "GSI2"}]} + ] - # Wait for the terminal condition to be set - max_wait_seconds = 120 - interval_seconds = 10 - start_time = time.time() - terminal_condition_set = False + # Update the resource + k8s.patch_custom_resource(ref, cr) - while time.time() - start_time < max_wait_seconds: - cr = k8s.get_resource(ref) - if cr and "status" in cr and "conditions" in cr["status"]: - for condition_obj in cr["status"]["conditions"]: - if condition_obj["type"] == "ACK.Terminal" and condition_obj["status"] == "True": - terminal_condition_set = True - # Verify the error message - assert "table must have DynamoDB Streams enabled with StreamViewType set to NEW_AND_OLD_IMAGES" in condition_obj[ - "message"] - break - - if terminal_condition_set: - break + # Wait for the new GSI to be created + table.wait_until( + table_name, + table.gsi_matches([ + { + "indexName": "GSI1", + "keySchema": [ + {"attributeName": "GSI1PK", "keyType": "HASH"}, + {"attributeName": "GSI1SK", "keyType": "RANGE"} + ], + "projection": { + "projectionType": "ALL" + } + }, + { + "indexName": "GSI2", + "keySchema": [ + {"attributeName": "GSI2PK", "keyType": "HASH"} + ], + "projection": { + "projectionType": "KEYS_ONLY" + } + } + ]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*2, + interval_seconds=30, + ) - time.sleep(interval_seconds) + table.wait_until( + table_name, + table.replicas_match( + [REPLICA_REGION_1, REPLICA_REGION_2, REPLICA_REGION_3]), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*2, + interval_seconds=30, + ) + + for region in [REPLICA_REGION_1, REPLICA_REGION_2, REPLICA_REGION_3]: + table.wait_until( + table_name, + table.replica_status_matches(region, "ACTIVE"), + timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, + interval_seconds=30, + ) + + table_info = table.get(table_name) + assert "GlobalSecondaryIndexes" in table_info + assert len(table_info["GlobalSecondaryIndexes"]) == 2 + gsi_names = [gsi["IndexName"] + for gsi in table_info["GlobalSecondaryIndexes"]] + assert "GSI1" in gsi_names + assert "GSI2" in gsi_names - assert terminal_condition_set, "Terminal condition was not set for invalid StreamSpecification" - - # def test_simultaneous_updates(self, table_with_replicas): - # (ref, res) = table_with_replicas - - # table_name = res["spec"]["tableName"] - - # # Check DynamoDB Table exists - # assert self.table_exists(table_name) - - # # Wait for the initial replicas to be created and be active - # table.wait_until( - # table_name, - # table.replicas_match([REPLICA_REGION_1, REPLICA_REGION_2]), - # timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, - # interval_seconds=30, - # ) - - # for region in [REPLICA_REGION_1, REPLICA_REGION_2]: - # table.wait_until( - # table_name, - # table.replica_status_matches(region, "ACTIVE"), - # timeout_seconds=REPLICA_WAIT_AFTER_SECONDS, - # interval_seconds=30, - # ) - - # # Get CR latest revision - # cr = k8s.wait_resource_consumed_by_controller(ref) - - # # Prepare simultaneous updates to multiple fields: - # # 1. Update tags - # # 2. Change replicas - # # 3. Add GSI - - # # Add attribute definitions needed for GSI - # cr["spec"]["attributeDefinitions"] = [ - # {"attributeName": "PK", "attributeType": "S"}, - # {"attributeName": "SK", "attributeType": "S"}, - # {"attributeName": "GSI1PK", "attributeType": "S"}, - # {"attributeName": "GSI1SK", "attributeType": "S"} - # ] - - # # Add a GSI - # cr["spec"]["globalSecondaryIndexes"] = [{ - # "indexName": "GSI1", - # "keySchema": [ - # {"attributeName": "GSI1PK", "keyType": "HASH"}, - # {"attributeName": "GSI1SK", "keyType": "RANGE"} - # ], - # "projection": { - # "projectionType": "ALL" - # } - # }] - - # # Update replicas - remove one and add a different one - # # Set directly rather than depending on current replicas - # cr["spec"]["tableReplicas"] = [ - # {"regionName": REPLICA_REGION_1}, # Keep the first defined region - # {"regionName": REPLICA_REGION_3} # Add a new region - # ] - - # # Update tags - # cr["spec"]["tags"] = [ - # {"key": "Environment", "value": "Test"}, - # {"key": "Purpose", "value": "SimontemourTest"}, - # {"key": "UpdatedAt", "value": time.strftime("%Y-%m-%d")} - # ] - - # # Patch k8s resource with all changes at once - # k8s.patch_custom_resource(ref, cr) - - # # Wait for GSI to be created (usually the slowest operation) - # table.wait_until( - # table_name, - # table.gsi_matches([{ - # "indexName": "GSI1", - # "keySchema": [ - # {"attributeName": "GSI1PK", "keyType": "HASH"}, - # {"attributeName": "GSI1SK", "keyType": "RANGE"} - # ], - # "projection": { - # "projectionType": "ALL" - # } - # }]), - # timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*3, - # interval_seconds=30, - # ) - - # # Wait for replicas to be updated - # expected_regions = [REPLICA_REGION_1, REPLICA_REGION_3] - # table.wait_until( - # table_name, - # table.replicas_match(expected_regions), - # timeout_seconds=REPLICA_WAIT_AFTER_SECONDS*3, - # interval_seconds=30, - # ) - - # # Verify all changes were applied - - # # Check tags - # table_tags = get_resource_tags( - # cr["status"]["ackResourceMetadata"]["arn"]) - # tags.assert_ack_system_tags(tags=table_tags) - - # expected_tags = { - # "Environment": "Test", - # "Purpose": "SimontemourTest", - # "UpdatedAt": time.strftime("%Y-%m-%d") - # } - - # # Verify custom tags (ignoring ACK system tags) - # for key, value in expected_tags.items(): - # assert key in table_tags - # assert table_tags[key] == value - - # # Verify GSI - # table_info = table.get(table_name) - # assert "GlobalSecondaryIndexes" in table_info - # assert len(table_info["GlobalSecondaryIndexes"]) == 1 - # assert table_info["GlobalSecondaryIndexes"][0]["IndexName"] == "GSI1" - - # # Verify replicas - # replicas = table.get_replicas(table_name) - # assert replicas is not None - # assert len(replicas) == 2 - # region_names = [r["RegionName"] for r in replicas] - # assert REPLICA_REGION_1 in region_names - # assert REPLICA_REGION_3 in region_names - # assert REPLICA_REGION_2 not in region_names - - # # Verify all replicas are active - # for replica in replicas: - # assert replica["ReplicaStatus"] == "ACTIVE" + replicas = table.get_replicas(table_name) + assert replicas is not None + assert len(replicas) == 3 + region_names = [r["RegionName"] for r in replicas] + assert REPLICA_REGION_1 in region_names + assert REPLICA_REGION_2 in region_names + assert REPLICA_REGION_3 in region_names \ No newline at end of file From 652f3f4407d346ac984819460daddcba275d75ad Mon Sep 17 00:00:00 2001 From: arush sharma Date: Mon, 24 Mar 2025 19:04:54 -0700 Subject: [PATCH 5/5] change update-replica style --- pkg/resource/table/hooks_replica_updates.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pkg/resource/table/hooks_replica_updates.go b/pkg/resource/table/hooks_replica_updates.go index 12bc5d9..f6cc180 100644 --- a/pkg/resource/table/hooks_replica_updates.go +++ b/pkg/resource/table/hooks_replica_updates.go @@ -196,7 +196,6 @@ func createReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) s func updateReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) svcsdktypes.ReplicationGroupUpdate { replicaUpdate := svcsdktypes.ReplicationGroupUpdate{} updateAction := &svcsdktypes.UpdateReplicationGroupMemberAction{} - isValidUpdate := false // updates to gsi without ProvisionedThroughputOverride are invalid if replica.RegionName != nil { updateAction.RegionName = aws.String(*replica.RegionName) @@ -205,12 +204,10 @@ func updateReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) s if replica.KMSMasterKeyID != nil { updateAction.KMSMasterKeyId = aws.String(*replica.KMSMasterKeyID) - isValidUpdate = true } if replica.TableClassOverride != nil { updateAction.TableClassOverride = svcsdktypes.TableClass(*replica.TableClassOverride) - isValidUpdate = true } if replica.ProvisionedThroughputOverride != nil && @@ -218,7 +215,6 @@ func updateReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) s updateAction.ProvisionedThroughputOverride = &svcsdktypes.ProvisionedThroughputOverride{ ReadCapacityUnits: replica.ProvisionedThroughputOverride.ReadCapacityUnits, } - isValidUpdate = true } // Only include GSIs that have provisioned throughput overrides @@ -232,16 +228,21 @@ func updateReplicaUpdate(replica *v1alpha1.CreateReplicationGroupMemberAction) s ReadCapacityUnits: gsi.ProvisionedThroughputOverride.ReadCapacityUnits, }, }) - isValidUpdate = true } } - // Only set GlobalSecondaryIndexes if we have GSIs with throughput overrides if len(gsisWithOverrides) > 0 { updateAction.GlobalSecondaryIndexes = gsisWithOverrides } - if isValidUpdate { + // Check if there are any actual updates to perform + // replica GSI updates are invalid updates since the GSI already exists on the source table + hasUpdates := updateAction.KMSMasterKeyId != nil || + updateAction.TableClassOverride != "" || + updateAction.ProvisionedThroughputOverride != nil || + len(updateAction.GlobalSecondaryIndexes) > 0 + + if hasUpdates { replicaUpdate.Update = updateAction return replicaUpdate }