From 428dd94f507b0833681325fb69606d15247e6c03 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 23 Jan 2018 08:53:26 +0000 Subject: [PATCH] resource/aws_dynamodb_table: Refactoring --- aws/data_source_aws_dynamodb_table.go | 40 +- aws/resource_aws_dynamodb_table.go | 1058 ++++++----------------- aws/resource_aws_dynamodb_table_test.go | 455 +++++++++- aws/structure.go | 301 +++++++ aws/tags.go | 76 +- 5 files changed, 1100 insertions(+), 830 deletions(-) diff --git a/aws/data_source_aws_dynamodb_table.go b/aws/data_source_aws_dynamodb_table.go index 844bddfb982..1845b7c361f 100644 --- a/aws/data_source_aws_dynamodb_table.go +++ b/aws/data_source_aws_dynamodb_table.go @@ -3,12 +3,10 @@ package aws import ( "bytes" "fmt" - "log" "strings" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" ) @@ -174,21 +172,41 @@ func dataSourceAwsDynamoDbTable() *schema.Resource { } func dataSourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn + conn := meta.(*AWSClient).dynamodbconn - name := d.Get("name").(string) - req := &dynamodb.DescribeTableInput{ - TableName: aws.String(name), + result, err := conn.DescribeTable(&dynamodb.DescribeTableInput{ + TableName: aws.String(d.Get("name").(string)), + }) + + if err != nil { + return fmt.Errorf("Error retrieving DynamoDB table: %s", err) } - log.Printf("[DEBUG] Reading DynamoDB Table: %s", req) - result, err := dynamodbconn.DescribeTable(req) + d.SetId(*result.Table.TableName) + err = flattenAwsDynamoDbTableResource(d, result.Table) if err != nil { - return errwrap.Wrapf("Error retrieving DynamoDB table: {{err}}", err) + return err } - d.SetId(*result.Table.TableName) + ttlOut, err := conn.DescribeTimeToLive(&dynamodb.DescribeTimeToLiveInput{ + TableName: aws.String(d.Id()), + }) + if err != nil { + return err + } + if ttlOut.TimeToLiveDescription != nil { + err := d.Set("ttl", flattenDynamoDbTtl(ttlOut.TimeToLiveDescription)) + if err != nil { + return err + } + } + + tags, err := readDynamoDbTableTags(d.Get("arn").(string), conn) + if err != nil { + return err + } + d.Set("tags", tags) - return flattenAwsDynamoDbTableResource(d, meta, result.Table) + return nil } diff --git a/aws/resource_aws_dynamodb_table.go b/aws/resource_aws_dynamodb_table.go index d9b072f88c9..c4b67d649f6 100644 --- a/aws/resource_aws_dynamodb_table.go +++ b/aws/resource_aws_dynamodb_table.go @@ -7,28 +7,13 @@ import ( "strings" "time" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/helper/schema" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" ) -// Number of times to retry if a throttling-related exception occurs -const DYNAMODB_MAX_THROTTLE_RETRIES = 5 - -// How long to sleep when a throttle-event happens -const DYNAMODB_THROTTLE_SLEEP = 5 * time.Second - -// How long to sleep if a limit-exceeded event happens -const DYNAMODB_LIMIT_EXCEEDED_SLEEP = 10 * time.Second - -// A number of these are marked as computed because if you don't -// provide a value, DynamoDB will provide you with defaults (which are the -// default values specified below) func resourceAwsDynamoDbTable() *schema.Resource { return &schema.Resource{ Create: resourceAwsDynamoDbTableCreate, @@ -212,498 +197,198 @@ func resourceAwsDynamoDbTable() *schema.Resource { } func resourceAwsDynamoDbTableCreate(d *schema.ResourceData, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - - name := d.Get("name").(string) + conn := meta.(*AWSClient).dynamodbconn - log.Printf("[DEBUG] DynamoDB table create: %s", name) - - throughput := &dynamodb.ProvisionedThroughput{ - ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))), - WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))), + keySchemaMap := map[string]interface{}{ + "hash_key": d.Get("hash_key").(string), } - - hash_key_name := d.Get("hash_key").(string) - keyschema := []*dynamodb.KeySchemaElement{ - { - AttributeName: aws.String(hash_key_name), - KeyType: aws.String("HASH"), - }, + if v, ok := d.GetOk("range_key"); ok { + keySchemaMap["range_key"] = v.(string) } - if range_key, ok := d.GetOk("range_key"); ok { - range_schema_element := &dynamodb.KeySchemaElement{ - AttributeName: aws.String(range_key.(string)), - KeyType: aws.String("RANGE"), - } - keyschema = append(keyschema, range_schema_element) - } + log.Printf("[DEBUG] Creating DynamoDB table with key schema: %#v", keySchemaMap) req := &dynamodb.CreateTableInput{ - TableName: aws.String(name), - ProvisionedThroughput: throughput, - KeySchema: keyschema, + TableName: aws.String(d.Get("name").(string)), + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))), + WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))), + }, + KeySchema: expandDynamoDbKeySchema(keySchemaMap), } - if attributedata, ok := d.GetOk("attribute"); ok { - attributes := []*dynamodb.AttributeDefinition{} - attributeSet := attributedata.(*schema.Set) - for _, attribute := range attributeSet.List() { - attr := attribute.(map[string]interface{}) - attributes = append(attributes, &dynamodb.AttributeDefinition{ - AttributeName: aws.String(attr["name"].(string)), - AttributeType: aws.String(attr["type"].(string)), - }) - } - - req.AttributeDefinitions = attributes + if v, ok := d.GetOk("attribute"); ok { + aSet := v.(*schema.Set) + req.AttributeDefinitions = expandDynamoDbAttributes(aSet.List()) } - if lsidata, ok := d.GetOk("local_secondary_index"); ok { - log.Printf("[DEBUG] Adding LSI data to the table") - - lsiSet := lsidata.(*schema.Set) - localSecondaryIndexes := []*dynamodb.LocalSecondaryIndex{} - for _, lsiObject := range lsiSet.List() { - lsi := lsiObject.(map[string]interface{}) - - projection := &dynamodb.Projection{ - ProjectionType: aws.String(lsi["projection_type"].(string)), - } - - if lsi["projection_type"] == "INCLUDE" { - non_key_attributes := []*string{} - for _, attr := range lsi["non_key_attributes"].([]interface{}) { - non_key_attributes = append(non_key_attributes, aws.String(attr.(string))) - } - projection.NonKeyAttributes = non_key_attributes - } - - localSecondaryIndexes = append(localSecondaryIndexes, &dynamodb.LocalSecondaryIndex{ - IndexName: aws.String(lsi["name"].(string)), - KeySchema: []*dynamodb.KeySchemaElement{ - { - AttributeName: aws.String(hash_key_name), - KeyType: aws.String("HASH"), - }, - { - AttributeName: aws.String(lsi["range_key"].(string)), - KeyType: aws.String("RANGE"), - }, - }, - Projection: projection, - }) - } - - req.LocalSecondaryIndexes = localSecondaryIndexes - - log.Printf("[DEBUG] Added %d LSI definitions", len(localSecondaryIndexes)) + if v, ok := d.GetOk("local_secondary_index"); ok { + lsiSet := v.(*schema.Set) + req.LocalSecondaryIndexes = expandDynamoDbLocalSecondaryIndexes(lsiSet.List(), keySchemaMap) } - if gsidata, ok := d.GetOk("global_secondary_index"); ok { + if v, ok := d.GetOk("global_secondary_index"); ok { globalSecondaryIndexes := []*dynamodb.GlobalSecondaryIndex{} - - gsiSet := gsidata.(*schema.Set) + gsiSet := v.(*schema.Set) for _, gsiObject := range gsiSet.List() { gsi := gsiObject.(map[string]interface{}) - gsiObject := createGSIFromData(&gsi) - globalSecondaryIndexes = append(globalSecondaryIndexes, &gsiObject) + gsiObject := expandDynamoDbGlobalSecondaryIndex(gsi) + globalSecondaryIndexes = append(globalSecondaryIndexes, gsiObject) } req.GlobalSecondaryIndexes = globalSecondaryIndexes } - if _, ok := d.GetOk("stream_enabled"); ok { - + if v, ok := d.GetOk("stream_enabled"); ok { req.StreamSpecification = &dynamodb.StreamSpecification{ - StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)), + StreamEnabled: aws.Bool(v.(bool)), StreamViewType: aws.String(d.Get("stream_view_type").(string)), } - - log.Printf("[DEBUG] Adding StreamSpecifications to the table") } - _, timeToLiveOk := d.GetOk("ttl") - _, tagsOk := d.GetOk("tags") - - attemptCount := 1 - for attemptCount <= DYNAMODB_MAX_THROTTLE_RETRIES { - output, err := dynamodbconn.CreateTable(req) + var output *dynamodb.CreateTableOutput + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var err error + output, err = conn.CreateTable(req) if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - switch code := awsErr.Code(); code { - case "ThrottlingException": - log.Printf("[DEBUG] Attempt %d/%d: Sleeping for a bit to throttle back create request", attemptCount, DYNAMODB_MAX_THROTTLE_RETRIES) - time.Sleep(DYNAMODB_THROTTLE_SLEEP) - attemptCount += 1 - case "LimitExceededException": - // If we're at resource capacity, error out without retry. e.g. - // Subscriber limit exceeded: There is a limit of 256 tables per subscriber - // Do not error out on this similar throttling message: - // Subscriber limit exceeded: Only 10 tables can be created, updated, or deleted simultaneously - if strings.Contains(awsErr.Message(), "Subscriber limit exceeded:") && !strings.Contains(awsErr.Message(), "can be created, updated, or deleted simultaneously") { - return fmt.Errorf("AWS Error creating DynamoDB table: %s", err) - } - log.Printf("[DEBUG] Limit on concurrent table creations hit, sleeping for a bit") - time.Sleep(DYNAMODB_LIMIT_EXCEEDED_SLEEP) - attemptCount += 1 - default: - // Some other non-retryable exception occurred - return fmt.Errorf("AWS Error creating DynamoDB table: %s", err) - } - } else { - // Non-AWS exception occurred, give up - return fmt.Errorf("Error creating DynamoDB table: %s", err) + if isAWSErr(err, "ThrottlingException", "") { + return resource.RetryableError(err) } - } else { - // No error, set ID and return - d.SetId(*output.TableDescription.TableName) - tableArn := *output.TableDescription.TableArn - if err := d.Set("arn", tableArn); err != nil { - return err + if isAWSErr(err, dynamodb.ErrCodeLimitExceededException, "can be created, updated, or deleted simultaneously") { + return resource.RetryableError(err) } - - // Wait, till table is active before imitating any TimeToLive changes - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - log.Printf("[DEBUG] Error waiting for table to be active: %s", err) - return err + if isAWSErr(err, dynamodb.ErrCodeLimitExceededException, "indexed tables that can be created simultaneously") { + return resource.RetryableError(err) } - log.Printf("[DEBUG] Setting DynamoDB TimeToLive on arn: %s", tableArn) - if timeToLiveOk { - if err := updateTimeToLive(d, meta); err != nil { - log.Printf("[DEBUG] Error updating table TimeToLive: %s", err) - return err - } - } + return resource.NonRetryableError(err) + } + return nil + }) + if err != nil { + return err + } - if tagsOk { - log.Printf("[DEBUG] Setting DynamoDB Tags on arn: %s", tableArn) - if err := createTableTags(d, meta); err != nil { - return err - } - } + d.SetId(*output.TableDescription.TableName) + d.Set("arn", output.TableDescription.TableArn) - return resourceAwsDynamoDbTableRead(d, meta) - } + if err := waitForDynamoDbTableToBeActive(d.Id(), 10*time.Minute, conn); err != nil { + return err } - // Too many throttling events occurred, give up - return fmt.Errorf("Unable to create DynamoDB table '%s' after %d attempts", name, attemptCount) + return resourceAwsDynamoDbTableUpdate(d, meta) } func resourceAwsDynamoDbTableUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).dynamodbconn - log.Printf("[DEBUG] Updating DynamoDB table %s", d.Id()) - dynamodbconn := meta.(*AWSClient).dynamodbconn - - // Ensure table is active before trying to update - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) - } - - if d.HasChange("read_capacity") || d.HasChange("write_capacity") { - req := &dynamodb.UpdateTableInput{ + // Cannot create or delete index while updating table IOPS + // so we update IOPS separately + if (d.HasChange("read_capacity") || d.HasChange("write_capacity")) && !d.IsNewResource() { + _, err := conn.UpdateTable(&dynamodb.UpdateTableInput{ TableName: aws.String(d.Id()), - } - - throughput := &dynamodb.ProvisionedThroughput{ - ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))), - WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))), - } - req.ProvisionedThroughput = throughput - - _, err := dynamodbconn.UpdateTable(req) - + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(int64(d.Get("read_capacity").(int))), + WriteCapacityUnits: aws.Int64(int64(d.Get("write_capacity").(int))), + }, + }) if err != nil { return err } - - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) + if err := waitForDynamoDbTableToBeActive(d.Id(), d.Timeout(schema.TimeoutUpdate), conn); err != nil { + return fmt.Errorf("Error waiting for Dynamo DB Table update: %s", err) } } - if d.HasChange("stream_enabled") || d.HasChange("stream_view_type") { - req := &dynamodb.UpdateTableInput{ + if (d.HasChange("stream_enabled") || d.HasChange("stream_view_type")) && !d.IsNewResource() { + input := &dynamodb.UpdateTableInput{ TableName: aws.String(d.Id()), + StreamSpecification: &dynamodb.StreamSpecification{ + StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)), + StreamViewType: aws.String(d.Get("stream_view_type").(string)), + }, } - - req.StreamSpecification = &dynamodb.StreamSpecification{ - StreamEnabled: aws.Bool(d.Get("stream_enabled").(bool)), - StreamViewType: aws.String(d.Get("stream_view_type").(string)), - } - - _, err := dynamodbconn.UpdateTable(req) - + _, err := conn.UpdateTable(input) if err != nil { return err } - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) + if err := waitForDynamoDbTableToBeActive(d.Id(), d.Timeout(schema.TimeoutUpdate), conn); err != nil { + return fmt.Errorf("Error waiting for Dynamo DB Table update: %s", err) } } - if d.HasChange("global_secondary_index") { - log.Printf("[DEBUG] Changed GSI data") - req := &dynamodb.UpdateTableInput{ - TableName: aws.String(d.Id()), + if d.HasChange("global_secondary_index") && !d.IsNewResource() { + var attributes []*dynamodb.AttributeDefinition + if v, ok := d.GetOk("attribute"); ok { + attributes = expandDynamoDbAttributes(v.(*schema.Set).List()) } o, n := d.GetChange("global_secondary_index") + ops := diffDynamoDbGSI(o.(*schema.Set).List(), n.(*schema.Set).List()) + log.Printf("[DEBUG] Updating global secondary indexes:\n%s", ops) - oldSet := o.(*schema.Set) - newSet := n.(*schema.Set) - - // Track old names so we can know which ones we need to just update based on - // capacity changes, terraform appears to only diff on the set hash, not the - // contents so we need to make sure we don't delete any indexes that we - // just want to update the capacity for - oldGsiNameSet := make(map[string]bool) - newGsiNameSet := make(map[string]bool) - - for _, gsidata := range oldSet.List() { - gsiName := gsidata.(map[string]interface{})["name"].(string) - oldGsiNameSet[gsiName] = true - } - - for _, gsidata := range newSet.List() { - gsiName := gsidata.(map[string]interface{})["name"].(string) - newGsiNameSet[gsiName] = true - } - - // First determine what's new - for _, newgsidata := range newSet.List() { - updates := []*dynamodb.GlobalSecondaryIndexUpdate{} - newGsiName := newgsidata.(map[string]interface{})["name"].(string) - if _, exists := oldGsiNameSet[newGsiName]; !exists { - attributes := []*dynamodb.AttributeDefinition{} - gsidata := newgsidata.(map[string]interface{}) - gsi := createGSIFromData(&gsidata) - log.Printf("[DEBUG] Adding GSI %s", *gsi.IndexName) - update := &dynamodb.GlobalSecondaryIndexUpdate{ - Create: &dynamodb.CreateGlobalSecondaryIndexAction{ - IndexName: gsi.IndexName, - KeySchema: gsi.KeySchema, - ProvisionedThroughput: gsi.ProvisionedThroughput, - Projection: gsi.Projection, - }, - } - updates = append(updates, update) - - // Hash key is required, range key isn't - hashkey_type, err := getAttributeType(d, *gsi.KeySchema[0].AttributeName) - if err != nil { - return err - } - - attributes = append(attributes, &dynamodb.AttributeDefinition{ - AttributeName: gsi.KeySchema[0].AttributeName, - AttributeType: aws.String(hashkey_type), - }) - - // If there's a range key, there will be 2 elements in KeySchema - if len(gsi.KeySchema) == 2 { - rangekey_type, err := getAttributeType(d, *gsi.KeySchema[1].AttributeName) - if err != nil { - return err - } - - attributes = append(attributes, &dynamodb.AttributeDefinition{ - AttributeName: gsi.KeySchema[1].AttributeName, - AttributeType: aws.String(rangekey_type), - }) - } - - req.AttributeDefinitions = attributes - req.GlobalSecondaryIndexUpdates = updates - _, err = dynamodbconn.UpdateTable(req) - - if err != nil { - return err - } - - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) - } - - if err := waitForGSIToBeActive(d.Id(), *gsi.IndexName, meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB GSIT to be active: {{err}}", err) - } - - } + input := &dynamodb.UpdateTableInput{ + TableName: aws.String(d.Id()), + AttributeDefinitions: attributes, } - for _, oldgsidata := range oldSet.List() { - updates := []*dynamodb.GlobalSecondaryIndexUpdate{} - oldGsiName := oldgsidata.(map[string]interface{})["name"].(string) - if _, exists := newGsiNameSet[oldGsiName]; !exists { - gsidata := oldgsidata.(map[string]interface{}) - log.Printf("[DEBUG] Deleting GSI %s", gsidata["name"].(string)) - update := &dynamodb.GlobalSecondaryIndexUpdate{ - Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ - IndexName: aws.String(gsidata["name"].(string)), - }, - } - updates = append(updates, update) - - req.GlobalSecondaryIndexUpdates = updates - _, err := dynamodbconn.UpdateTable(req) - - if err != nil { - return err - } - - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB Table update: {{err}}", err) - } - } - } - } - - // Update any out-of-date read / write capacity - if gsiObjects, ok := d.GetOk("global_secondary_index"); ok { - gsiSet := gsiObjects.(*schema.Set) - if len(gsiSet.List()) > 0 { - log.Printf("Updating capacity as needed!") - - // We can only change throughput, but we need to make sure it's actually changed - tableDescription, err := dynamodbconn.DescribeTable(&dynamodb.DescribeTableInput{ - TableName: aws.String(d.Id()), - }) - + // Only 1 online index can be created or deleted simultaneously per table + for _, op := range ops { + input.GlobalSecondaryIndexUpdates = []*dynamodb.GlobalSecondaryIndexUpdate{op} + _, err := conn.UpdateTable(input) if err != nil { return err } - - table := tableDescription.Table - - for _, updatedgsidata := range gsiSet.List() { - updates := []*dynamodb.GlobalSecondaryIndexUpdate{} - gsidata := updatedgsidata.(map[string]interface{}) - gsiName := gsidata["name"].(string) - gsiWriteCapacity := gsidata["write_capacity"].(int) - gsiReadCapacity := gsidata["read_capacity"].(int) - - log.Printf("[DEBUG] Updating GSI %s", gsiName) - gsi, err := getGlobalSecondaryIndex(gsiName, table.GlobalSecondaryIndexes) - - if err != nil { - return err + if op.Create != nil { + idxName := *op.Create.IndexName + if err := waitForDynamoDbGSIToBeActive(d.Id(), idxName, conn); err != nil { + return fmt.Errorf("Error waiting for Dynamo DB GSI %q to be created: %s", idxName, err) } - - capacityUpdated := false - - if int64(gsiReadCapacity) != *gsi.ProvisionedThroughput.ReadCapacityUnits || - int64(gsiWriteCapacity) != *gsi.ProvisionedThroughput.WriteCapacityUnits { - capacityUpdated = true - } - - if capacityUpdated { - update := &dynamodb.GlobalSecondaryIndexUpdate{ - Update: &dynamodb.UpdateGlobalSecondaryIndexAction{ - IndexName: aws.String(gsidata["name"].(string)), - ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ - WriteCapacityUnits: aws.Int64(int64(gsiWriteCapacity)), - ReadCapacityUnits: aws.Int64(int64(gsiReadCapacity)), - }, - }, - } - updates = append(updates, update) - + } + if op.Update != nil { + idxName := *op.Update.IndexName + if err := waitForDynamoDbGSIToBeActive(d.Id(), idxName, conn); err != nil { + return fmt.Errorf("Error waiting for Dynamo DB GSI %q to be updated: %s", idxName, err) } - - if len(updates) > 0 { - - req := &dynamodb.UpdateTableInput{ - TableName: aws.String(d.Id()), - } - - req.GlobalSecondaryIndexUpdates = updates - - log.Printf("[DEBUG] Updating GSI read / write capacity on %s", d.Id()) - _, err := dynamodbconn.UpdateTable(req) - - if err != nil { - log.Printf("[DEBUG] Error updating table: %s", err) - return err - } - - if err := waitForGSIToBeActive(d.Id(), gsiName, meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB GSI to be active: {{err}}", err) - } + } + if op.Delete != nil { + idxName := *op.Delete.IndexName + if err := waitForDynamoDbGSIToBeDeleted(d.Id(), idxName, conn); err != nil { + return fmt.Errorf("Error waiting for Dynamo DB GSI %q to be deleted: %s", idxName, err) } } } + if err := waitForDynamoDbTableToBeActive(d.Id(), d.Timeout(schema.TimeoutUpdate), conn); err != nil { + return fmt.Errorf("Error waiting for Dynamo DB Table op: %s", err) + } } if d.HasChange("ttl") { - if err := updateTimeToLive(d, meta); err != nil { + if err := updateDynamoDbTimeToLive(d, conn); err != nil { log.Printf("[DEBUG] Error updating table TimeToLive: %s", err) return err } } - // Update tags - if err := setTagsDynamoDb(dynamodbconn, d); err != nil { - return err - } - - return resourceAwsDynamoDbTableRead(d, meta) -} - -func updateTimeToLive(d *schema.ResourceData, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - - if ttl, ok := d.GetOk("ttl"); ok { - - timeToLiveSet := ttl.(*schema.Set) - - spec := &dynamodb.TimeToLiveSpecification{} - - timeToLive := timeToLiveSet.List()[0].(map[string]interface{}) - spec.AttributeName = aws.String(timeToLive["attribute_name"].(string)) - spec.Enabled = aws.Bool(timeToLive["enabled"].(bool)) - - req := &dynamodb.UpdateTimeToLiveInput{ - TableName: aws.String(d.Id()), - TimeToLiveSpecification: spec, - } - - _, err := dynamodbconn.UpdateTimeToLive(req) - - if err != nil { - // If ttl was not set within the .tf file before and has now been added we still run this command to update - // But there has been no change so lets continue - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ValidationException" && awsErr.Message() == "TimeToLive is already disabled" { - return nil - } - log.Printf("[DEBUG] Error updating TimeToLive on table: %s", err) + if d.HasChange("tags") { + if err := setTagsDynamoDb(conn, d); err != nil { return err } - - log.Printf("[DEBUG] Updated TimeToLive on table") - - if err := waitForTimeToLiveUpdateToBeCompleted(d.Id(), timeToLive["enabled"].(bool), meta); err != nil { - return errwrap.Wrapf("Error waiting for Dynamo DB TimeToLive to be updated: {{err}}", err) - } } - return nil + return resourceAwsDynamoDbTableRead(d, meta) } func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - log.Printf("[DEBUG] Loading data for DynamoDB table '%s'", d.Id()) - req := &dynamodb.DescribeTableInput{ - TableName: aws.String(d.Id()), - } + conn := meta.(*AWSClient).dynamodbconn - result, err := dynamodbconn.DescribeTable(req) + result, err := conn.DescribeTable(&dynamodb.DescribeTableInput{ + TableName: aws.String(d.Id()), + }) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { + if isAWSErr(err, "ResourceNotFoundException", "") { log.Printf("[WARN] Dynamodb Table (%s) not found, error code (404)", d.Id()) d.SetId("") return nil @@ -711,444 +396,265 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro return err } - return flattenAwsDynamoDbTableResource(d, meta, result.Table) -} - -func flattenAwsDynamoDbTableResource(d *schema.ResourceData, meta interface{}, table *dynamodb.TableDescription) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - - d.Set("write_capacity", table.ProvisionedThroughput.WriteCapacityUnits) - d.Set("read_capacity", table.ProvisionedThroughput.ReadCapacityUnits) - - attributes := []interface{}{} - for _, attrdef := range table.AttributeDefinitions { - attribute := map[string]string{ - "name": *attrdef.AttributeName, - "type": *attrdef.AttributeType, - } - attributes = append(attributes, attribute) - log.Printf("[DEBUG] Added Attribute: %s", attribute["name"]) - } - - d.Set("attribute", attributes) - d.Set("name", table.TableName) - - for _, attribute := range table.KeySchema { - if *attribute.KeyType == "HASH" { - d.Set("hash_key", attribute.AttributeName) - } - - if *attribute.KeyType == "RANGE" { - d.Set("range_key", attribute.AttributeName) - } - } - - lsiList := make([]map[string]interface{}, 0, len(table.LocalSecondaryIndexes)) - for _, lsiObject := range table.LocalSecondaryIndexes { - lsi := map[string]interface{}{ - "name": *lsiObject.IndexName, - "projection_type": *lsiObject.Projection.ProjectionType, - } - - for _, attribute := range lsiObject.KeySchema { - - if *attribute.KeyType == "RANGE" { - lsi["range_key"] = *attribute.AttributeName - } - } - nkaList := make([]string, len(lsiObject.Projection.NonKeyAttributes)) - for _, nka := range lsiObject.Projection.NonKeyAttributes { - nkaList = append(nkaList, *nka) - } - lsi["non_key_attributes"] = nkaList - - lsiList = append(lsiList, lsi) - } - - err := d.Set("local_secondary_index", lsiList) - if err != nil { - return err - } - - gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes)) - for _, gsiObject := range table.GlobalSecondaryIndexes { - gsi := map[string]interface{}{ - "write_capacity": *gsiObject.ProvisionedThroughput.WriteCapacityUnits, - "read_capacity": *gsiObject.ProvisionedThroughput.ReadCapacityUnits, - "name": *gsiObject.IndexName, - } - - for _, attribute := range gsiObject.KeySchema { - if *attribute.KeyType == "HASH" { - gsi["hash_key"] = *attribute.AttributeName - } - - if *attribute.KeyType == "RANGE" { - gsi["range_key"] = *attribute.AttributeName - } - } - - gsi["projection_type"] = *(gsiObject.Projection.ProjectionType) - - nonKeyAttrs := make([]string, 0, len(gsiObject.Projection.NonKeyAttributes)) - for _, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes { - nonKeyAttrs = append(nonKeyAttrs, *nonKeyAttr) - } - gsi["non_key_attributes"] = nonKeyAttrs - - gsiList = append(gsiList, gsi) - log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"]) - } - - if table.StreamSpecification != nil { - d.Set("stream_view_type", table.StreamSpecification.StreamViewType) - d.Set("stream_enabled", table.StreamSpecification.StreamEnabled) - d.Set("stream_arn", table.LatestStreamArn) - d.Set("stream_label", table.LatestStreamLabel) - } - - err = d.Set("global_secondary_index", gsiList) + err = flattenAwsDynamoDbTableResource(d, result.Table) if err != nil { return err } - d.Set("arn", table.TableArn) - - timeToLiveReq := &dynamodb.DescribeTimeToLiveInput{ + ttlOut, err := conn.DescribeTimeToLive(&dynamodb.DescribeTimeToLiveInput{ TableName: aws.String(d.Id()), - } - timeToLiveOutput, err := dynamodbconn.DescribeTimeToLive(timeToLiveReq) + }) if err != nil { return err } - - if timeToLiveOutput.TimeToLiveDescription != nil && timeToLiveOutput.TimeToLiveDescription.AttributeName != nil { - timeToLiveList := []interface{}{ - map[string]interface{}{ - "attribute_name": *timeToLiveOutput.TimeToLiveDescription.AttributeName, - "enabled": (*timeToLiveOutput.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusEnabled), - }, - } - err := d.Set("ttl", timeToLiveList) + if ttlOut.TimeToLiveDescription != nil { + err := d.Set("ttl", flattenDynamoDbTtl(ttlOut.TimeToLiveDescription)) if err != nil { return err } - - log.Printf("[DEBUG] Loaded TimeToLive data for DynamoDB table '%s'", d.Id()) } - tags, err := readTableTags(d, meta) + tags, err := readDynamoDbTableTags(d.Get("arn").(string), conn) if err != nil { return err } - if len(tags) != 0 { - d.Set("tags", tags) - } + d.Set("tags", tags) return nil } func resourceAwsDynamoDbTableDelete(d *schema.ResourceData, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn + conn := meta.(*AWSClient).dynamodbconn log.Printf("[DEBUG] DynamoDB delete table: %s", d.Id()) - err := resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { - _, err := dynamodbconn.DeleteTable(&dynamodb.DeleteTableInput{ - TableName: aws.String(d.Id()), - }) - if err != nil { - // Table is already deleted - if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "Requested resource not found: Table: ") { - return nil - } - // This logic handles multiple scenarios in the DynamoDB API: - // 1. Updating a table immediately before deletion may return: - // ResourceInUseException: Attempt to change a resource which is still in use: Table is being updated: - // 2. Removing a table from a DynamoDB global table may return: - // ResourceInUseException: Attempt to change a resource which is still in use: Table is being deleted: - if isAWSErr(err, dynamodb.ErrCodeResourceInUseException, "") { - return resource.RetryableError(err) - } - // Unknown error - return resource.NonRetryableError(err) - } - - return nil + _, err := conn.DeleteTable(&dynamodb.DeleteTableInput{ + TableName: aws.String(d.Id()), }) - - // check error from retry if err != nil { + if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "Requested resource not found: Table: ") { + return nil + } return err } - log.Println("[INFO] Waiting for DynamoDB Table to be destroyed") stateConf := &resource.StateChangeConf{ Pending: []string{ dynamodb.TableStatusActive, - dynamodb.TableStatusCreating, dynamodb.TableStatusDeleting, - dynamodb.TableStatusUpdating, }, - Target: []string{}, - Refresh: resourceAwsDynamoDbTableStateRefreshFunc(d, meta), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 10 * time.Second, + Target: []string{}, + Timeout: d.Timeout(schema.TimeoutDelete), + Refresh: func() (interface{}, string, error) { + out, err := conn.DescribeTable(&dynamodb.DescribeTableInput{ + TableName: aws.String(d.Id()), + }) + if err != nil { + if isAWSErr(err, "ResourceNotFoundException", "") { + return nil, "", nil + } + + return 42, "", err + } + table := out.Table + + return table, *table.TableStatus, nil + }, } _, err = stateConf.WaitForState() return err } -func resourceAwsDynamoDbTableRetrieve(d *schema.ResourceData, meta interface{}) (*dynamodb.TableDescription, error) { - dynamodbconn := meta.(*AWSClient).dynamodbconn +func updateDynamoDbTimeToLive(d *schema.ResourceData, conn *dynamodb.DynamoDB) error { + toBeEnabled := false + attributeName := "" - input := &dynamodb.DescribeTableInput{ - TableName: aws.String(d.Id()), - } + o, n := d.GetChange("ttl") + newTtl, ok := n.(*schema.Set) + blockExists := ok && newTtl.Len() > 0 - log.Printf("[DEBUG] Retrieving DynamoDB Table: %#v", input) + if blockExists { + ttlList := newTtl.List() + ttlMap := ttlList[0].(map[string]interface{}) + attributeName = ttlMap["attribute_name"].(string) + toBeEnabled = ttlMap["enabled"].(bool) - output, err := dynamodbconn.DescribeTable(input) - if err != nil { - if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "") { - return nil, nil - } - return nil, fmt.Errorf("Error retrieving DynamoDB Table: %s", err) + } else if !d.IsNewResource() { + oldTtlList := o.(*schema.Set).List() + ttlMap := oldTtlList[0].(map[string]interface{}) + attributeName = ttlMap["attribute_name"].(string) + toBeEnabled = false } - return output.Table, nil -} - -func resourceAwsDynamoDbTableStateRefreshFunc( - d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - td, err := resourceAwsDynamoDbTableRetrieve(d, meta) - + if attributeName != "" { + _, err := conn.UpdateTimeToLive(&dynamodb.UpdateTimeToLiveInput{ + TableName: aws.String(d.Id()), + TimeToLiveSpecification: &dynamodb.TimeToLiveSpecification{ + AttributeName: aws.String(attributeName), + Enabled: aws.Bool(toBeEnabled), + }, + }) if err != nil { - log.Printf("Error on retrieving DynamoDB Table when waiting: %s", err) - return nil, "", err - } - - if td == nil { - return nil, "", nil + if isAWSErr(err, "ValidationException", "TimeToLive is already disabled") { + return nil + } + return err } - if td.TableStatus != nil { - log.Printf("[DEBUG] Status for DynamoDB Table %s: %s", d.Id(), *td.TableStatus) + err = waitForDynamoDbTtlUpdateToBeCompleted(d.Id(), toBeEnabled, conn) + if err != nil { + return fmt.Errorf("Error waiting for Dynamo DB TimeToLive to be updated: %s", err) } - - return td, *td.TableStatus, nil } -} -func createGSIFromData(data *map[string]interface{}) dynamodb.GlobalSecondaryIndex { - - projection := &dynamodb.Projection{ - ProjectionType: aws.String((*data)["projection_type"].(string)), - } + return nil +} - if (*data)["projection_type"] == "INCLUDE" { - non_key_attributes := []*string{} - for _, attr := range (*data)["non_key_attributes"].([]interface{}) { - non_key_attributes = append(non_key_attributes, aws.String(attr.(string))) - } - projection.NonKeyAttributes = non_key_attributes +func readDynamoDbTableTags(arn string, conn *dynamodb.DynamoDB) (map[string]string, error) { + output, err := conn.ListTagsOfResource(&dynamodb.ListTagsOfResourceInput{ + ResourceArn: aws.String(arn), + }) + if err != nil { + return nil, fmt.Errorf("Error reading tags from dynamodb resource: %s", err) } - writeCapacity := (*data)["write_capacity"].(int) - readCapacity := (*data)["read_capacity"].(int) + result := tagsToMapDynamoDb(output.Tags) - key_schema := []*dynamodb.KeySchemaElement{ - { - AttributeName: aws.String((*data)["hash_key"].(string)), - KeyType: aws.String("HASH"), - }, - } + // TODO Read NextToken if available - range_key_name := (*data)["range_key"] - if range_key_name != "" { - range_key_element := &dynamodb.KeySchemaElement{ - AttributeName: aws.String(range_key_name.(string)), - KeyType: aws.String("RANGE"), - } + return result, nil +} - key_schema = append(key_schema, range_key_element) - } +// Waiters - return dynamodb.GlobalSecondaryIndex{ - IndexName: aws.String((*data)["name"].(string)), - KeySchema: key_schema, - Projection: projection, - ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ - WriteCapacityUnits: aws.Int64(int64(writeCapacity)), - ReadCapacityUnits: aws.Int64(int64(readCapacity)), +func waitForDynamoDbGSIToBeActive(tableName string, gsiName string, conn *dynamodb.DynamoDB) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + dynamodb.IndexStatusCreating, + dynamodb.IndexStatusUpdating, }, - } -} - -func getGlobalSecondaryIndex(indexName string, indexList []*dynamodb.GlobalSecondaryIndexDescription) (*dynamodb.GlobalSecondaryIndexDescription, error) { - for _, gsi := range indexList { - if *gsi.IndexName == indexName { - return gsi, nil - } - } + Target: []string{dynamodb.IndexStatusActive}, + Timeout: 10 * time.Minute, + Refresh: func() (interface{}, string, error) { + result, err := conn.DescribeTable(&dynamodb.DescribeTableInput{ + TableName: aws.String(tableName), + }) + if err != nil { + return 42, "", err + } - return &dynamodb.GlobalSecondaryIndexDescription{}, fmt.Errorf("Can't find a GSI by that name...") -} + table := result.Table -func getAttributeType(d *schema.ResourceData, attributeName string) (string, error) { - if attributedata, ok := d.GetOk("attribute"); ok { - attributeSet := attributedata.(*schema.Set) - for _, attribute := range attributeSet.List() { - attr := attribute.(map[string]interface{}) - if attr["name"] == attributeName { - return attr["type"].(string), nil + // Find index + var targetGSI *dynamodb.GlobalSecondaryIndexDescription + for _, gsi := range table.GlobalSecondaryIndexes { + if *gsi.IndexName == gsiName { + targetGSI = gsi + } } - } - } - return "", fmt.Errorf("Unable to find an attribute named %s", attributeName) -} + if targetGSI != nil { + return table, *targetGSI.IndexStatus, nil + } -func waitForGSIToBeActive(tableName string, gsiName string, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.DescribeTableInput{ - TableName: aws.String(tableName), + return nil, "", nil + }, } + _, err := stateConf.WaitForState() + return err +} - activeIndex := false - - for activeIndex == false { - - result, err := dynamodbconn.DescribeTable(req) - - if err != nil { - return err - } - - table := result.Table - var targetGSI *dynamodb.GlobalSecondaryIndexDescription = nil - - for _, gsi := range table.GlobalSecondaryIndexes { - if *gsi.IndexName == gsiName { - targetGSI = gsi +func waitForDynamoDbGSIToBeDeleted(tableName string, gsiName string, conn *dynamodb.DynamoDB) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + dynamodb.IndexStatusActive, + dynamodb.IndexStatusDeleting, + }, + Target: []string{}, + Timeout: 10 * time.Minute, + Refresh: func() (interface{}, string, error) { + result, err := conn.DescribeTable(&dynamodb.DescribeTableInput{ + TableName: aws.String(tableName), + }) + if err != nil { + return 42, "", err } - } - if targetGSI != nil { - activeIndex = *targetGSI.IndexStatus == "ACTIVE" + table := result.Table - if !activeIndex { - log.Printf("[DEBUG] Sleeping for 5 seconds for %s GSI to become active", gsiName) - time.Sleep(5 * time.Second) + // Find index + var targetGSI *dynamodb.GlobalSecondaryIndexDescription + for _, gsi := range table.GlobalSecondaryIndexes { + if *gsi.IndexName == gsiName { + targetGSI = gsi + } } - } else { - log.Printf("[DEBUG] GSI %s did not exist, giving up", gsiName) - break - } - } - - return nil -} + if targetGSI == nil { + return nil, "", nil + } -func waitForTableToBeActive(tableName string, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.DescribeTableInput{ - TableName: aws.String(tableName), + return targetGSI, *targetGSI.IndexStatus, nil + }, } + _, err := stateConf.WaitForState() + return err +} - activeState := false - - for activeState == false { - result, err := dynamodbconn.DescribeTable(req) - - if err != nil { - return err - } - - activeState = *result.Table.TableStatus == "ACTIVE" +func waitForDynamoDbTableToBeActive(tableName string, timeout time.Duration, conn *dynamodb.DynamoDB) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{dynamodb.TableStatusCreating, dynamodb.TableStatusUpdating}, + Target: []string{dynamodb.TableStatusActive}, + Timeout: timeout, + Refresh: func() (interface{}, string, error) { + result, err := conn.DescribeTable(&dynamodb.DescribeTableInput{ + TableName: aws.String(tableName), + }) + if err != nil { + return 42, "", err + } - // Wait for a few seconds - if !activeState { - log.Printf("[DEBUG] Sleeping for 5 seconds for table to become active") - time.Sleep(5 * time.Second) - } + return result, *result.Table.TableStatus, nil + }, } + _, err := stateConf.WaitForState() - return nil - + return err } -func waitForTimeToLiveUpdateToBeCompleted(tableName string, enabled bool, meta interface{}) error { - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.DescribeTimeToLiveInput{ - TableName: aws.String(tableName), +func waitForDynamoDbTtlUpdateToBeCompleted(tableName string, toEnable bool, conn *dynamodb.DynamoDB) error { + pending := []string{ + dynamodb.TimeToLiveStatusEnabled, + dynamodb.TimeToLiveStatusDisabling, } + target := []string{dynamodb.TimeToLiveStatusDisabled} - stateMatched := false - for stateMatched == false { - result, err := dynamodbconn.DescribeTimeToLive(req) - - if err != nil { - return err - } - - if enabled { - stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusEnabled - } else { - stateMatched = *result.TimeToLiveDescription.TimeToLiveStatus == dynamodb.TimeToLiveStatusDisabled - } - - // Wait for a few seconds, this may take a long time... - if !stateMatched { - log.Printf("[DEBUG] Sleeping for 5 seconds before checking TimeToLive state again") - time.Sleep(5 * time.Second) + if toEnable { + pending = []string{ + dynamodb.TimeToLiveStatusDisabled, + dynamodb.TimeToLiveStatusEnabling, } + target = []string{dynamodb.TimeToLiveStatusEnabled} } - log.Printf("[DEBUG] TimeToLive update complete") - - return nil - -} - -func createTableTags(d *schema.ResourceData, meta interface{}) error { - // DynamoDB Table has to be in the ACTIVE state in order to tag the resource - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return err - } - tags := d.Get("tags").(map[string]interface{}) - arn := d.Get("arn").(string) - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.TagResourceInput{ - ResourceArn: aws.String(arn), - Tags: tagsFromMapDynamoDb(tags), - } - _, err := dynamodbconn.TagResource(req) - if err != nil { - return fmt.Errorf("Error tagging dynamodb resource: %s", err) - } - return nil -} + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: target, + Timeout: 10 * time.Second, + Refresh: func() (interface{}, string, error) { + result, err := conn.DescribeTimeToLive(&dynamodb.DescribeTimeToLiveInput{ + TableName: aws.String(tableName), + }) + if err != nil { + return 42, "", err + } -func readTableTags(d *schema.ResourceData, meta interface{}) (map[string]string, error) { - if err := waitForTableToBeActive(d.Id(), meta); err != nil { - return nil, err - } - arn := d.Get("arn").(string) - //result := make(map[string]string) + ttlDesc := result.TimeToLiveDescription - dynamodbconn := meta.(*AWSClient).dynamodbconn - req := &dynamodb.ListTagsOfResourceInput{ - ResourceArn: aws.String(arn), + return result, *ttlDesc.TimeToLiveStatus, nil + }, } - output, err := dynamodbconn.ListTagsOfResource(req) - if err != nil { - return nil, fmt.Errorf("Error reading tags from dynamodb resource: %s", err) - } - result := tagsToMapDynamoDb(output.Tags) - // TODO Read NextToken if avail - return result, nil + _, err := stateConf.WaitForState() + return err } diff --git a/aws/resource_aws_dynamodb_table_test.go b/aws/resource_aws_dynamodb_table_test.go index d64d5686cba..bc9e40bbe6a 100644 --- a/aws/resource_aws_dynamodb_table_test.go +++ b/aws/resource_aws_dynamodb_table_test.go @@ -13,6 +13,270 @@ import ( "github.com/hashicorp/terraform/terraform" ) +func TestDiffDynamoDbGSI(t *testing.T) { + testCases := []struct { + Old []interface{} + New []interface{} + ExpectedUpdates []*dynamodb.GlobalSecondaryIndexUpdate + }{ + { // No-op + Old: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + }, + }, + New: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + }, + }, + ExpectedUpdates: []*dynamodb.GlobalSecondaryIndexUpdate{}, + }, + + { // Creation + Old: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + }, + }, + New: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + }, + map[string]interface{}{ + "name": "att2-index", + "hash_key": "att2", + "write_capacity": 12, + "read_capacity": 11, + "projection_type": "ALL", + }, + }, + ExpectedUpdates: []*dynamodb.GlobalSecondaryIndexUpdate{ + { + Create: &dynamodb.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String("att2-index"), + KeySchema: []*dynamodb.KeySchemaElement{ + { + AttributeName: aws.String("att2"), + KeyType: aws.String("HASH"), + }, + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + WriteCapacityUnits: aws.Int64(12), + ReadCapacityUnits: aws.Int64(11), + }, + Projection: &dynamodb.Projection{ + ProjectionType: aws.String("ALL"), + }, + }, + }, + }, + }, + + { // Deletion + Old: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + }, + map[string]interface{}{ + "name": "att2-index", + "hash_key": "att2", + "write_capacity": 12, + "read_capacity": 11, + "projection_type": "ALL", + }, + }, + New: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + }, + }, + ExpectedUpdates: []*dynamodb.GlobalSecondaryIndexUpdate{ + { + Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String("att2-index"), + }, + }, + }, + }, + + { // Update + Old: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + }, + }, + New: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 20, + "read_capacity": 30, + "projection_type": "ALL", + }, + }, + ExpectedUpdates: []*dynamodb.GlobalSecondaryIndexUpdate{ + { + Update: &dynamodb.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String("att1-index"), + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + WriteCapacityUnits: aws.Int64(20), + ReadCapacityUnits: aws.Int64(30), + }, + }, + }, + }, + }, + + { // Update of non-capacity attributes + Old: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + }, + }, + New: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att-new", + "range_key": "new-range-key", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "KEYS_ONLY", + "non_key_attributes": []interface{}{"RandomAttribute"}, + }, + }, + ExpectedUpdates: []*dynamodb.GlobalSecondaryIndexUpdate{ + { + Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String("att1-index"), + }, + }, + { + Create: &dynamodb.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String("att1-index"), + KeySchema: []*dynamodb.KeySchemaElement{ + { + AttributeName: aws.String("att-new"), + KeyType: aws.String("HASH"), + }, + { + AttributeName: aws.String("new-range-key"), + KeyType: aws.String("RANGE"), + }, + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + WriteCapacityUnits: aws.Int64(10), + ReadCapacityUnits: aws.Int64(10), + }, + Projection: &dynamodb.Projection{ + ProjectionType: aws.String("KEYS_ONLY"), + NonKeyAttributes: aws.StringSlice([]string{"RandomAttribute"}), + }, + }, + }, + }, + }, + + { // Update of all attributes + Old: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att1", + "write_capacity": 10, + "read_capacity": 10, + "projection_type": "ALL", + }, + }, + New: []interface{}{ + map[string]interface{}{ + "name": "att1-index", + "hash_key": "att-new", + "range_key": "new-range-key", + "write_capacity": 12, + "read_capacity": 12, + "projection_type": "INCLUDE", + "non_key_attributes": []interface{}{"RandomAttribute"}, + }, + }, + ExpectedUpdates: []*dynamodb.GlobalSecondaryIndexUpdate{ + { + Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String("att1-index"), + }, + }, + { + Create: &dynamodb.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String("att1-index"), + KeySchema: []*dynamodb.KeySchemaElement{ + { + AttributeName: aws.String("att-new"), + KeyType: aws.String("HASH"), + }, + { + AttributeName: aws.String("new-range-key"), + KeyType: aws.String("RANGE"), + }, + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + WriteCapacityUnits: aws.Int64(12), + ReadCapacityUnits: aws.Int64(12), + }, + Projection: &dynamodb.Projection{ + ProjectionType: aws.String("INCLUDE"), + NonKeyAttributes: aws.StringSlice([]string{"RandomAttribute"}), + }, + }, + }, + }, + }, + } + + for i, tc := range testCases { + ops := diffDynamoDbGSI(tc.Old, tc.New) + + // Convert to strings to avoid dealing with pointers + opsS := fmt.Sprintf("%v", ops) + opsExpectedS := fmt.Sprintf("%v", tc.ExpectedUpdates) + + if opsS != opsExpectedS { + t.Fatalf("Case #%d: Given:\n%s\n\nExpected:\n%s", + i, opsS, opsExpectedS) + } + } +} + func TestAccAWSDynamoDbTable_basic(t *testing.T) { var conf dynamodb.DescribeTableOutput @@ -87,7 +351,32 @@ func TestAccAWSDynamoDbTable_tags(t *testing.T) { } // https://github.com/hashicorp/terraform/issues/13243 -func TestAccAWSDynamoDbTable_gsiUpdate(t *testing.T) { +func TestAccAWSDynamoDbTable_gsiUpdateCapacity(t *testing.T) { + var conf dynamodb.DescribeTableOutput + name := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSDynamoDbConfigGsiUpdate(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.test", &conf), + ), + }, + { + Config: testAccAWSDynamoDbConfigGsiUpdatedCapacity(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.test", &conf), + ), + }, + }, + }) +} + +func TestAccAWSDynamoDbTable_gsiUpdateOtherAttributes(t *testing.T) { var conf dynamodb.DescribeTableOutput name := acctest.RandString(10) @@ -103,7 +392,33 @@ func TestAccAWSDynamoDbTable_gsiUpdate(t *testing.T) { ), }, { - Config: testAccAWSDynamoDbConfigGsiUpdated(name), + Config: testAccAWSDynamoDbConfigGsiUpdatedOtherAttributes(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.test", &conf), + ), + }, + }, + }) +} + +// https://github.com/terraform-providers/terraform-provider-aws/issues/566 +func TestAccAWSDynamoDbTable_gsiUpdateNonKeyAttributes(t *testing.T) { + var conf dynamodb.DescribeTableOutput + name := acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSDynamoDbConfigGsiUpdatedOtherAttributes(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.test", &conf), + ), + }, + { + Config: testAccAWSDynamoDbConfigGsiUpdatedNonKeyAttributes(name), Check: resource.ComposeTestCheckFunc( testAccCheckInitialAWSDynamoDbTableExists("aws_dynamodb_table.test", &conf), ), @@ -687,7 +1002,7 @@ resource "aws_dynamodb_table" "test" { `, name) } -func testAccAWSDynamoDbConfigGsiUpdated(name string) string { +func testAccAWSDynamoDbConfigGsiUpdatedCapacity(name string) string { return fmt.Sprintf(` variable "capacity" { default = 20 @@ -746,6 +1061,140 @@ resource "aws_dynamodb_table" "test" { `, name) } +func testAccAWSDynamoDbConfigGsiUpdatedOtherAttributes(name string) string { + return fmt.Sprintf(` +variable "capacity" { + default = 10 +} + +resource "aws_dynamodb_table" "test" { + name = "tf-acc-test-%s" + read_capacity = "${var.capacity}" + write_capacity = "${var.capacity}" + hash_key = "id" + + attribute { + name = "id" + type = "S" + } + + attribute { + name = "att1" + type = "S" + } + + attribute { + name = "att2" + type = "S" + } + + attribute { + name = "att3" + type = "S" + } + + attribute { + name = "att4" + type = "S" + } + + global_secondary_index { + name = "att1-index" + hash_key = "att1" + write_capacity = "${var.capacity}" + read_capacity = "${var.capacity}" + projection_type = "ALL" + } + + global_secondary_index { + name = "att2-index" + hash_key = "att4" + range_key = "att2" + write_capacity = "${var.capacity}" + read_capacity = "${var.capacity}" + projection_type = "ALL" + } + + global_secondary_index { + name = "att3-index" + hash_key = "att3" + range_key = "att4" + write_capacity = "${var.capacity}" + read_capacity = "${var.capacity}" + projection_type = "INCLUDE" + non_key_attributes = ["RandomAttribute"] + } +} +`, name) +} + +func testAccAWSDynamoDbConfigGsiUpdatedNonKeyAttributes(name string) string { + return fmt.Sprintf(` +variable "capacity" { + default = 10 +} + +resource "aws_dynamodb_table" "test" { + name = "tf-acc-test-%s" + read_capacity = "${var.capacity}" + write_capacity = "${var.capacity}" + hash_key = "id" + + attribute { + name = "id" + type = "S" + } + + attribute { + name = "att1" + type = "S" + } + + attribute { + name = "att2" + type = "S" + } + + attribute { + name = "att3" + type = "S" + } + + attribute { + name = "att4" + type = "S" + } + + global_secondary_index { + name = "att1-index" + hash_key = "att1" + write_capacity = "${var.capacity}" + read_capacity = "${var.capacity}" + projection_type = "ALL" + } + + global_secondary_index { + name = "att2-index" + hash_key = "att4" + range_key = "att2" + write_capacity = "${var.capacity}" + read_capacity = "${var.capacity}" + projection_type = "ALL" + } + + global_secondary_index { + name = "att3-index" + hash_key = "att3" + range_key = "att4" + write_capacity = "${var.capacity}" + read_capacity = "${var.capacity}" + projection_type = "INCLUDE" + non_key_attributes = ["RandomAttribute", "AnotherAttribute"] + } +} +`, name) +} + func testAccAWSDynamoDbConfigAddTimeToLive(rName string) string { return fmt.Sprintf(` resource "aws_dynamodb_table" "basic-dynamodb-table" { diff --git a/aws/structure.go b/aws/structure.go index bc99f8bed65..402632dca88 100644 --- a/aws/structure.go +++ b/aws/structure.go @@ -19,6 +19,7 @@ import ( "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" "github.com/aws/aws-sdk-go/service/configservice" "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/aws-sdk-go/service/elasticache" @@ -3074,3 +3075,303 @@ func flattenMqBrokerInstances(instances []*mq.BrokerInstance) []interface{} { return l } + +func diffDynamoDbGSI(oldGsi, newGsi []interface{}) (ops []*dynamodb.GlobalSecondaryIndexUpdate) { + // Transform slices into maps + oldGsis := make(map[string]interface{}) + for _, gsidata := range oldGsi { + m := gsidata.(map[string]interface{}) + oldGsis[m["name"].(string)] = m + } + newGsis := make(map[string]interface{}) + for _, gsidata := range newGsi { + m := gsidata.(map[string]interface{}) + newGsis[m["name"].(string)] = m + } + + for _, data := range newGsi { + newMap := data.(map[string]interface{}) + newName := newMap["name"].(string) + + if _, exists := oldGsis[newName]; !exists { + m := data.(map[string]interface{}) + idxName := m["name"].(string) + + ops = append(ops, &dynamodb.GlobalSecondaryIndexUpdate{ + Create: &dynamodb.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String(idxName), + KeySchema: expandDynamoDbKeySchema(m), + ProvisionedThroughput: expandDynamoDbProvisionedThroughput(m), + Projection: expandDynamoDbProjection(m), + }, + }) + } + } + + for _, data := range oldGsi { + oldMap := data.(map[string]interface{}) + oldName := oldMap["name"].(string) + + newData, exists := newGsis[oldName] + if exists { + newMap := newData.(map[string]interface{}) + idxName := newMap["name"].(string) + + oldWriteCapacity, oldReadCapacity := oldMap["write_capacity"].(int), oldMap["read_capacity"].(int) + newWriteCapacity, newReadCapacity := newMap["write_capacity"].(int), newMap["read_capacity"].(int) + capacityChanged := (oldWriteCapacity != newWriteCapacity || oldReadCapacity != newReadCapacity) + + oldAttributes := stripCapacityAttributes(oldMap) + newAttributes := stripCapacityAttributes(newMap) + otherAttributesChanged := !reflect.DeepEqual(oldAttributes, newAttributes) + + if capacityChanged && !otherAttributesChanged { + update := &dynamodb.GlobalSecondaryIndexUpdate{ + Update: &dynamodb.UpdateGlobalSecondaryIndexAction{ + IndexName: aws.String(idxName), + ProvisionedThroughput: expandDynamoDbProvisionedThroughput(newMap), + }, + } + ops = append(ops, update) + } else if otherAttributesChanged { + // Other attributes cannot be updated + ops = append(ops, &dynamodb.GlobalSecondaryIndexUpdate{ + Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String(idxName), + }, + }) + + ops = append(ops, &dynamodb.GlobalSecondaryIndexUpdate{ + Create: &dynamodb.CreateGlobalSecondaryIndexAction{ + IndexName: aws.String(idxName), + KeySchema: expandDynamoDbKeySchema(newMap), + ProvisionedThroughput: expandDynamoDbProvisionedThroughput(newMap), + Projection: expandDynamoDbProjection(newMap), + }, + }) + } + } else { + idxName := oldName + ops = append(ops, &dynamodb.GlobalSecondaryIndexUpdate{ + Delete: &dynamodb.DeleteGlobalSecondaryIndexAction{ + IndexName: aws.String(idxName), + }, + }) + } + } + return +} + +func stripCapacityAttributes(in map[string]interface{}) map[string]interface{} { + m := map[string]interface{}{ + "hash_key": in["hash_key"].(string), + } + + if v, ok := in["range_key"].(string); ok && v != "" { + m["range_key"] = v + } + if v, ok := in["projection_type"].(string); ok && v != "" { + m["projection_type"] = v + } + if v, ok := in["non_key_attributes"].([]interface{}); ok && len(v) > 0 { + m["non_key_attributes"] = v + } + + return m +} + +// Expanders + flatteners + +func flattenDynamoDbTtl(ttlDesc *dynamodb.TimeToLiveDescription) []interface{} { + m := map[string]interface{}{} + if ttlDesc.AttributeName != nil { + m["attribute_name"] = *ttlDesc.AttributeName + if ttlDesc.TimeToLiveStatus != nil { + m["enabled"] = (*ttlDesc.TimeToLiveStatus == dynamodb.TimeToLiveStatusEnabled) + } + } + if len(m) > 0 { + return []interface{}{m} + } + + return []interface{}{} +} + +func flattenAwsDynamoDbTableResource(d *schema.ResourceData, table *dynamodb.TableDescription) error { + d.Set("write_capacity", table.ProvisionedThroughput.WriteCapacityUnits) + d.Set("read_capacity", table.ProvisionedThroughput.ReadCapacityUnits) + + attributes := []interface{}{} + for _, attrdef := range table.AttributeDefinitions { + attribute := map[string]string{ + "name": *attrdef.AttributeName, + "type": *attrdef.AttributeType, + } + attributes = append(attributes, attribute) + } + + d.Set("attribute", attributes) + d.Set("name", table.TableName) + + for _, attribute := range table.KeySchema { + if *attribute.KeyType == "HASH" { + d.Set("hash_key", attribute.AttributeName) + } + + if *attribute.KeyType == "RANGE" { + d.Set("range_key", attribute.AttributeName) + } + } + + lsiList := make([]map[string]interface{}, 0, len(table.LocalSecondaryIndexes)) + for _, lsiObject := range table.LocalSecondaryIndexes { + lsi := map[string]interface{}{ + "name": *lsiObject.IndexName, + "projection_type": *lsiObject.Projection.ProjectionType, + } + + for _, attribute := range lsiObject.KeySchema { + + if *attribute.KeyType == "RANGE" { + lsi["range_key"] = *attribute.AttributeName + } + } + nkaList := make([]string, len(lsiObject.Projection.NonKeyAttributes)) + for _, nka := range lsiObject.Projection.NonKeyAttributes { + nkaList = append(nkaList, *nka) + } + lsi["non_key_attributes"] = nkaList + + lsiList = append(lsiList, lsi) + } + + err := d.Set("local_secondary_index", lsiList) + if err != nil { + return err + } + + gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes)) + for _, gsiObject := range table.GlobalSecondaryIndexes { + gsi := map[string]interface{}{ + "write_capacity": *gsiObject.ProvisionedThroughput.WriteCapacityUnits, + "read_capacity": *gsiObject.ProvisionedThroughput.ReadCapacityUnits, + "name": *gsiObject.IndexName, + } + + for _, attribute := range gsiObject.KeySchema { + if *attribute.KeyType == "HASH" { + gsi["hash_key"] = *attribute.AttributeName + } + + if *attribute.KeyType == "RANGE" { + gsi["range_key"] = *attribute.AttributeName + } + } + + gsi["projection_type"] = *(gsiObject.Projection.ProjectionType) + + nonKeyAttrs := make([]string, 0, len(gsiObject.Projection.NonKeyAttributes)) + for _, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes { + nonKeyAttrs = append(nonKeyAttrs, *nonKeyAttr) + } + gsi["non_key_attributes"] = nonKeyAttrs + + gsiList = append(gsiList, gsi) + } + + if table.StreamSpecification != nil { + d.Set("stream_view_type", table.StreamSpecification.StreamViewType) + d.Set("stream_enabled", table.StreamSpecification.StreamEnabled) + d.Set("stream_arn", table.LatestStreamArn) + d.Set("stream_label", table.LatestStreamLabel) + } + + err = d.Set("global_secondary_index", gsiList) + if err != nil { + return err + } + + d.Set("arn", table.TableArn) + + return nil +} + +func expandDynamoDbAttributes(cfg []interface{}) []*dynamodb.AttributeDefinition { + attributes := make([]*dynamodb.AttributeDefinition, len(cfg), len(cfg)) + for i, attribute := range cfg { + attr := attribute.(map[string]interface{}) + attributes[i] = &dynamodb.AttributeDefinition{ + AttributeName: aws.String(attr["name"].(string)), + AttributeType: aws.String(attr["type"].(string)), + } + } + return attributes +} + +// TODO: Get rid of keySchemaM - the user should just explicitely define +// this in the config, we shouldn't magically be setting it like this. +// Removal will however require config change, hence BC. :/ +func expandDynamoDbLocalSecondaryIndexes(cfg []interface{}, keySchemaM map[string]interface{}) []*dynamodb.LocalSecondaryIndex { + indexes := make([]*dynamodb.LocalSecondaryIndex, len(cfg), len(cfg)) + for i, lsi := range cfg { + m := lsi.(map[string]interface{}) + idxName := m["name"].(string) + + m["hash_key"] = keySchemaM["hash_key"] + + indexes[i] = &dynamodb.LocalSecondaryIndex{ + IndexName: aws.String(idxName), + KeySchema: expandDynamoDbKeySchema(m), + Projection: expandDynamoDbProjection(m), + } + } + return indexes +} + +func expandDynamoDbGlobalSecondaryIndex(data map[string]interface{}) *dynamodb.GlobalSecondaryIndex { + return &dynamodb.GlobalSecondaryIndex{ + IndexName: aws.String(data["name"].(string)), + KeySchema: expandDynamoDbKeySchema(data), + Projection: expandDynamoDbProjection(data), + ProvisionedThroughput: expandDynamoDbProvisionedThroughput(data), + } +} + +func expandDynamoDbProvisionedThroughput(data map[string]interface{}) *dynamodb.ProvisionedThroughput { + return &dynamodb.ProvisionedThroughput{ + WriteCapacityUnits: aws.Int64(int64(data["write_capacity"].(int))), + ReadCapacityUnits: aws.Int64(int64(data["read_capacity"].(int))), + } +} + +func expandDynamoDbProjection(data map[string]interface{}) *dynamodb.Projection { + projection := &dynamodb.Projection{ + ProjectionType: aws.String(data["projection_type"].(string)), + } + + if v, ok := data["non_key_attributes"].([]interface{}); ok && len(v) > 0 { + projection.NonKeyAttributes = expandStringList(v) + } + + return projection +} + +func expandDynamoDbKeySchema(data map[string]interface{}) []*dynamodb.KeySchemaElement { + keySchema := []*dynamodb.KeySchemaElement{} + + if v, ok := data["hash_key"]; ok && v != nil && v != "" { + keySchema = append(keySchema, &dynamodb.KeySchemaElement{ + AttributeName: aws.String(v.(string)), + KeyType: aws.String("HASH"), + }) + } + + if v, ok := data["range_key"]; ok && v != nil && v != "" { + keySchema = append(keySchema, &dynamodb.KeySchemaElement{ + AttributeName: aws.String(v.(string)), + KeyType: aws.String("RANGE"), + }) + } + + return keySchema +} diff --git a/aws/tags.go b/aws/tags.go index 46438c0fd4a..7a4bfdef174 100644 --- a/aws/tags.go +++ b/aws/tags.go @@ -333,53 +333,49 @@ func tagsFromMapDynamoDb(m map[string]interface{}) []*dynamodb.Tag { // method from the ec2 tag resource handling. Also the `UntagResource` method // for dynamoDB only requires a list of tag keys, instead of the full map of keys. func setTagsDynamoDb(conn *dynamodb.DynamoDB, d *schema.ResourceData) error { - if d.HasChange("tags") { - arn := d.Get("arn").(string) - oraw, nraw := d.GetChange("tags") - o := oraw.(map[string]interface{}) - n := nraw.(map[string]interface{}) - create, remove := diffTagsDynamoDb(tagsFromMapDynamoDb(o), tagsFromMapDynamoDb(n)) - - // Set tags - if len(remove) > 0 { - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) - _, err := conn.UntagResource(&dynamodb.UntagResourceInput{ - ResourceArn: aws.String(arn), - TagKeys: remove, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && strings.Contains(ec2err.Code(), "ResourceNotFoundException") { - return resource.RetryableError(err) // retry - } - return resource.NonRetryableError(err) - } - return nil + arn := d.Get("arn").(string) + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsDynamoDb(tagsFromMapDynamoDb(o), tagsFromMapDynamoDb(n)) + + // Set tags + if len(remove) > 0 { + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) + _, err := conn.UntagResource(&dynamodb.UntagResourceInput{ + ResourceArn: aws.String(arn), + TagKeys: remove, }) if err != nil { - return err + if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) } + return nil + }) + if err != nil { + return err } - if len(create) > 0 { - err := resource.Retry(2*time.Minute, func() *resource.RetryError { - log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) - _, err := conn.TagResource(&dynamodb.TagResourceInput{ - ResourceArn: aws.String(arn), - Tags: create, - }) - if err != nil { - ec2err, ok := err.(awserr.Error) - if ok && strings.Contains(ec2err.Code(), "ResourceNotFoundException") { - return resource.RetryableError(err) // retry - } - return resource.NonRetryableError(err) - } - return nil + } + if len(create) > 0 { + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) + _, err := conn.TagResource(&dynamodb.TagResourceInput{ + ResourceArn: aws.String(arn), + Tags: create, }) if err != nil { - return err + if isAWSErr(err, dynamodb.ErrCodeResourceNotFoundException, "") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) } + return nil + }) + if err != nil { + return err } }