diff --git a/.changelog/19354.txt b/.changelog/19354.txt new file mode 100644 index 00000000000..41e6864c406 --- /dev/null +++ b/.changelog/19354.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_timestreamwrite_table +``` \ No newline at end of file diff --git a/aws/provider.go b/aws/provider.go index 607f5e3f97c..9cb1937d824 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -1059,6 +1059,7 @@ func Provider() *schema.Provider { "aws_swf_domain": resourceAwsSwfDomain(), "aws_synthetics_canary": resourceAwsSyntheticsCanary(), "aws_timestreamwrite_database": resourceAwsTimestreamWriteDatabase(), + "aws_timestreamwrite_table": resourceAwsTimestreamWriteTable(), "aws_transfer_server": resourceAwsTransferServer(), "aws_transfer_ssh_key": resourceAwsTransferSshKey(), "aws_transfer_user": resourceAwsTransferUser(), diff --git a/aws/resource_aws_timestreamwrite_database_test.go b/aws/resource_aws_timestreamwrite_database_test.go index 60b2f1f7efb..ebb0e5d4ac4 100644 --- a/aws/resource_aws_timestreamwrite_database_test.go +++ b/aws/resource_aws_timestreamwrite_database_test.go @@ -19,8 +19,9 @@ import ( func init() { resource.AddTestSweepers("aws_timestreamwrite_database", &resource.Sweeper{ - Name: "aws_timestreamwrite_database", - F: testSweepTimestreamWriteDatabases, + Name: "aws_timestreamwrite_database", + F: testSweepTimestreamWriteDatabases, + Dependencies: []string{"aws_timestreamwrite_table"}, }) } diff --git a/aws/resource_aws_timestreamwrite_table.go b/aws/resource_aws_timestreamwrite_table.go new file mode 100644 index 00000000000..2e4dd7293b8 --- /dev/null +++ b/aws/resource_aws_timestreamwrite_table.go @@ -0,0 +1,285 @@ +package aws + +import ( + "context" + "fmt" + "log" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/timestreamwrite" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func resourceAwsTimestreamWriteTable() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceAwsTimestreamWriteTableCreate, + ReadWithoutTimeout: resourceAwsTimestreamWriteTableRead, + UpdateWithoutTimeout: resourceAwsTimestreamWriteTableUpdate, + DeleteWithoutTimeout: resourceAwsTimestreamWriteTableDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "database_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`), "must only include alphanumeric, underscore, period, or hyphen characters"), + ), + }, + + "retention_properties": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "magnetic_store_retention_period_in_days": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 73000), + }, + + "memory_store_retention_period_in_hours": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 8766), + }, + }, + }, + }, + + "table_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.All( + validation.StringLenBetween(3, 64), + validation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`), "must only include alphanumeric, underscore, period, or hyphen characters"), + ), + }, + + "tags": tagsSchema(), + + "tags_all": tagsSchemaComputed(), + }, + + CustomizeDiff: SetTagsDiff, + } +} + +func resourceAwsTimestreamWriteTableCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*AWSClient).timestreamwriteconn + defaultTagsConfig := meta.(*AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(keyvaluetags.New(d.Get("tags").(map[string]interface{}))) + + tableName := d.Get("table_name").(string) + input := ×treamwrite.CreateTableInput{ + DatabaseName: aws.String(d.Get("database_name").(string)), + TableName: aws.String(tableName), + } + + if v, ok := d.GetOk("retention_properties"); ok && len(v.([]interface{})) > 0 && v.([]interface{}) != nil { + input.RetentionProperties = expandTimestreamWriteRetentionProperties(v.([]interface{})) + } + + if len(tags) > 0 { + input.Tags = tags.IgnoreAws().TimestreamwriteTags() + } + + output, err := conn.CreateTableWithContext(ctx, input) + + if err != nil { + return diag.FromErr(fmt.Errorf("error creating Timestream Table (%s): %w", tableName, err)) + } + + if output == nil || output.Table == nil { + return diag.FromErr(fmt.Errorf("error creating Timestream Table (%s): empty output", tableName)) + } + + d.SetId(fmt.Sprintf("%s:%s", aws.StringValue(output.Table.TableName), aws.StringValue(output.Table.DatabaseName))) + + return resourceAwsTimestreamWriteTableRead(ctx, d, meta) +} + +func resourceAwsTimestreamWriteTableRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*AWSClient).timestreamwriteconn + defaultTagsConfig := meta.(*AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig + + tableName, databaseName, err := resourceAwsTimestreamWriteTableParseId(d.Id()) + + if err != nil { + return diag.FromErr(err) + } + + input := ×treamwrite.DescribeTableInput{ + DatabaseName: aws.String(databaseName), + TableName: aws.String(tableName), + } + + output, err := conn.DescribeTableWithContext(ctx, input) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, timestreamwrite.ErrCodeResourceNotFoundException) { + log.Printf("[WARN] Timestream Table %s not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if output == nil || output.Table == nil { + return diag.FromErr(fmt.Errorf("error reading Timestream Table (%s): empty output", d.Id())) + } + + table := output.Table + arn := aws.StringValue(table.Arn) + + d.Set("arn", arn) + d.Set("database_name", table.DatabaseName) + + if err := d.Set("retention_properties", flattenTimestreamWriteRetentionProperties(table.RetentionProperties)); err != nil { + return diag.FromErr(fmt.Errorf("error setting retention_properties: %w", err)) + } + + d.Set("table_name", table.TableName) + + tags, err := keyvaluetags.TimestreamwriteListTags(conn, arn) + + if err != nil { + return diag.FromErr(fmt.Errorf("error listing tags for Timestream Table (%s): %w", arn, err)) + } + + tags = tags.IgnoreAws().IgnoreConfig(ignoreTagsConfig) + + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return diag.FromErr(fmt.Errorf("error setting tags: %w", err)) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return diag.FromErr(fmt.Errorf("error setting tags_all: %w", err)) + } + + return nil +} + +func resourceAwsTimestreamWriteTableUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*AWSClient).timestreamwriteconn + + if d.HasChange("retention_properties") { + tableName, databaseName, err := resourceAwsTimestreamWriteTableParseId(d.Id()) + + if err != nil { + return diag.FromErr(err) + } + + input := ×treamwrite.UpdateTableInput{ + DatabaseName: aws.String(databaseName), + RetentionProperties: expandTimestreamWriteRetentionProperties(d.Get("retention_properties").([]interface{})), + TableName: aws.String(tableName), + } + + _, err = conn.UpdateTableWithContext(ctx, input) + + if err != nil { + return diag.FromErr(fmt.Errorf("error updating Timestream Table (%s): %w", d.Id(), err)) + } + } + + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := keyvaluetags.TimestreamwriteUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return diag.FromErr(fmt.Errorf("error updating Timestream Table (%s) tags: %w", d.Get("arn").(string), err)) + } + } + + return resourceAwsTimestreamWriteTableRead(ctx, d, meta) +} + +func resourceAwsTimestreamWriteTableDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*AWSClient).timestreamwriteconn + + tableName, databaseName, err := resourceAwsTimestreamWriteTableParseId(d.Id()) + + if err != nil { + return diag.FromErr(err) + } + + input := ×treamwrite.DeleteTableInput{ + DatabaseName: aws.String(databaseName), + TableName: aws.String(tableName), + } + + _, err = conn.DeleteTableWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, timestreamwrite.ErrCodeResourceNotFoundException) { + return nil + } + + if err != nil { + return diag.FromErr(fmt.Errorf("error deleting Timestream Table (%s): %w", d.Id(), err)) + } + + return nil +} + +func expandTimestreamWriteRetentionProperties(l []interface{}) *timestreamwrite.RetentionProperties { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + rp := ×treamwrite.RetentionProperties{} + + if v, ok := tfMap["magnetic_store_retention_period_in_days"].(int); ok { + rp.MagneticStoreRetentionPeriodInDays = aws.Int64(int64(v)) + } + + if v, ok := tfMap["memory_store_retention_period_in_hours"].(int); ok { + rp.MemoryStoreRetentionPeriodInHours = aws.Int64(int64(v)) + } + + return rp +} + +func flattenTimestreamWriteRetentionProperties(rp *timestreamwrite.RetentionProperties) []interface{} { + if rp == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "magnetic_store_retention_period_in_days": aws.Int64Value(rp.MagneticStoreRetentionPeriodInDays), + "memory_store_retention_period_in_hours": aws.Int64Value(rp.MemoryStoreRetentionPeriodInHours), + } + + return []interface{}{m} +} + +func resourceAwsTimestreamWriteTableParseId(id string) (string, string, error) { + idParts := strings.SplitN(id, ":", 2) + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + return "", "", fmt.Errorf("unexpected format of ID (%s), expected table_name:database_name", id) + } + return idParts[0], idParts[1], nil +} diff --git a/aws/resource_aws_timestreamwrite_table_test.go b/aws/resource_aws_timestreamwrite_table_test.go new file mode 100644 index 00000000000..753c2a31ea3 --- /dev/null +++ b/aws/resource_aws_timestreamwrite_table_test.go @@ -0,0 +1,377 @@ +package aws + +import ( + "context" + "fmt" + "log" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/timestreamwrite" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func init() { + resource.AddTestSweepers("aws_timestreamwrite_table", &resource.Sweeper{ + Name: "aws_timestreamwrite_table", + F: testSweepTimestreamWriteTables, + }) +} + +func testSweepTimestreamWriteTables(region string) error { + client, err := sharedClientForRegion(region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.(*AWSClient).timestreamwriteconn + ctx := context.Background() + + var sweeperErrs *multierror.Error + + input := ×treamwrite.ListTablesInput{} + + err = conn.ListTablesPagesWithContext(ctx, input, func(page *timestreamwrite.ListTablesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, table := range page.Tables { + if table == nil { + continue + } + + tableName := aws.StringValue(table.TableName) + dbName := aws.StringValue(table.TableName) + + log.Printf("[INFO] Deleting Timestream Table (%s) from Database (%s)", tableName, dbName) + r := resourceAwsTimestreamWriteTable() + d := r.Data(nil) + d.SetId(fmt.Sprintf("%s:%s", tableName, dbName)) + + diags := r.DeleteWithoutTimeout(ctx, d, client) + + if diags != nil && diags.HasError() { + for _, d := range diags { + if d.Severity == diag.Error { + sweeperErr := fmt.Errorf("error deleting Timestream Table (%s): %s", dbName, d.Summary) + log.Printf("[ERROR] %s", sweeperErr) + sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) + } + } + } + } + + return !lastPage + }) + + if testSweepSkipSweepError(err) { + log.Printf("[WARN] Skipping Timestream Table sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing Timestream Tables: %w", err)) + } + + return sweeperErrs.ErrorOrNil() +} + +func TestAccAWSTimestreamWriteTable_basic(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_timestreamwrite_table.test" + dbResourceName := "aws_timestreamwrite_database.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSTimestreamWrite(t) }, + ErrorCheck: testAccErrorCheck(t, timestreamwrite.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSTimestreamWriteTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSTimestreamWriteTableConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSTimestreamWriteTableExists(resourceName), + testAccCheckResourceAttrRegionalARN(resourceName, "arn", "timestream", fmt.Sprintf("database/%[1]s/table/%[1]s", rName)), + resource.TestCheckResourceAttrPair(resourceName, "database_name", dbResourceName, "database_name"), + resource.TestCheckResourceAttr(resourceName, "retention_properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "table_name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccAWSTimestreamWriteTable_disappears(t *testing.T) { + resourceName := "aws_timestreamwrite_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSTimestreamWrite(t) }, + ErrorCheck: testAccErrorCheck(t, timestreamwrite.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSTimestreamWriteTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSTimestreamWriteTableConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSTimestreamWriteTableExists(resourceName), + testAccCheckResourceDisappears(testAccProvider, resourceAwsTimestreamWriteTable(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccAWSTimestreamWriteTable_RetentionProperties(t *testing.T) { + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_timestreamwrite_table.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSTimestreamWrite(t) }, + ErrorCheck: testAccErrorCheck(t, timestreamwrite.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSTimestreamWriteTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSTimestreamWriteTableConfigRetentionProperties(rName, 30, 120), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSTimestreamWriteTableExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "retention_properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "retention_properties.0.magnetic_store_retention_period_in_days", "30"), + resource.TestCheckResourceAttr(resourceName, "retention_properties.0.memory_store_retention_period_in_hours", "120"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSTimestreamWriteTableConfigRetentionProperties(rName, 300, 7), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSTimestreamWriteTableExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "retention_properties.#", "1"), + resource.TestCheckResourceAttr(resourceName, "retention_properties.0.magnetic_store_retention_period_in_days", "300"), + resource.TestCheckResourceAttr(resourceName, "retention_properties.0.memory_store_retention_period_in_hours", "7"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSTimestreamWriteTableConfigBasic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSTimestreamWriteTableExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "retention_properties.#", "1"), + ), + }, + }, + }) +} + +func TestAccAWSTimestreamWriteTable_Tags(t *testing.T) { + resourceName := "aws_timestreamwrite_table.test" + rName := acctest.RandomWithPrefix("tf-acc-test") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSTimestreamWrite(t) }, + ErrorCheck: testAccErrorCheck(t, timestreamwrite.EndpointsID), + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSTimestreamWriteTableDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSTimestreamWriteTableConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSTimestreamWriteTableExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + resource.TestCheckResourceAttr(resourceName, "tags_all.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags_all.key1", "value1"), + ), + }, + { + Config: testAccAWSTimestreamWriteTableConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSTimestreamWriteTableExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + resource.TestCheckResourceAttr(resourceName, "tags_all.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags_all.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags_all.key2", "value2"), + ), + }, + { + Config: testAccAWSTimestreamWriteTableConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSTimestreamWriteTableExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + resource.TestCheckResourceAttr(resourceName, "tags_all.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags_all.key2", "value2"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAWSTimestreamWriteTableDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).timestreamwriteconn + ctx := context.Background() + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_timestreamwrite_table" { + continue + } + + tableName, dbName, err := resourceAwsTimestreamWriteTableParseId(rs.Primary.ID) + + if err != nil { + return err + } + + input := ×treamwrite.DescribeTableInput{ + DatabaseName: aws.String(dbName), + TableName: aws.String(tableName), + } + + output, err := conn.DescribeTableWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, timestreamwrite.ErrCodeResourceNotFoundException) { + continue + } + + if err != nil { + return err + } + + if output != nil && output.Table != nil { + return fmt.Errorf("Timestream Table (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckAWSTimestreamWriteTableExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("no resource ID is set") + } + + tableName, dbName, err := resourceAwsTimestreamWriteTableParseId(rs.Primary.ID) + + if err != nil { + return err + } + + conn := testAccProvider.Meta().(*AWSClient).timestreamwriteconn + + input := ×treamwrite.DescribeTableInput{ + DatabaseName: aws.String(dbName), + TableName: aws.String(tableName), + } + + output, err := conn.DescribeTableWithContext(context.Background(), input) + + if err != nil { + return err + } + + if output == nil || output.Table == nil { + return fmt.Errorf("Timestream Table (%s) not found", rs.Primary.ID) + } + + return nil + } +} + +func testAccAWSTimestreamWriteTableBaseConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_timestreamwrite_database" "test" { + database_name = %q +} +`, rName) +} + +func testAccAWSTimestreamWriteTableConfigBasic(rName string) string { + return composeConfig( + testAccAWSTimestreamWriteTableBaseConfig(rName), + fmt.Sprintf(` +resource "aws_timestreamwrite_table" "test" { + database_name = aws_timestreamwrite_database.test.database_name + table_name = %q +} +`, rName)) +} + +func testAccAWSTimestreamWriteTableConfigRetentionProperties(rName string, magneticStoreDays, memoryStoreHours int) string { + return composeConfig( + testAccAWSTimestreamWriteTableBaseConfig(rName), + fmt.Sprintf(` +resource "aws_timestreamwrite_table" "test" { + database_name = aws_timestreamwrite_database.test.database_name + table_name = %q + + retention_properties { + magnetic_store_retention_period_in_days = %d + memory_store_retention_period_in_hours = %d + } +} +`, rName, magneticStoreDays, memoryStoreHours)) +} + +func testAccAWSTimestreamWriteTableConfigTags1(rName, tagKey1, tagValue1 string) string { + return composeConfig( + testAccAWSTimestreamWriteTableBaseConfig(rName), + fmt.Sprintf(` +resource "aws_timestreamwrite_table" "test" { + database_name = aws_timestreamwrite_database.test.database_name + table_name = %[1]q + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccAWSTimestreamWriteTableConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return composeConfig( + testAccAWSTimestreamWriteTableBaseConfig(rName), + fmt.Sprintf(` +resource "aws_timestreamwrite_table" "test" { + database_name = aws_timestreamwrite_database.test.database_name + table_name = %[1]q + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/website/docs/r/timestreamwrite_table.html.markdown b/website/docs/r/timestreamwrite_table.html.markdown new file mode 100644 index 00000000000..66f78ee716b --- /dev/null +++ b/website/docs/r/timestreamwrite_table.html.markdown @@ -0,0 +1,72 @@ +--- +subcategory: "Timestream Write" +layout: "aws" +page_title: "AWS: aws_timestreamwrite_table" +description: |- + Provides a Timestream table resource. +--- + +# Resource: aws_timestreamwrite_table + +Provides a Timestream table resource. + +## Example Usage + +### Basic usage + +```hcl +resource "aws_timestreamwrite_table" "example" { + database_name = aws_timestreamwrite_database.example.database_name + table_name = "example" +} +``` + +### Full usage + +```hcl +resource "aws_timestreamwrite_table" "example" { + database_name = aws_timestreamwrite_database.example.database_name + table_name = "example" + + retention_properties { + magnetic_store_retention_period_in_days = 30 + memory_store_retention_period_in_hours = 8 + } + + tags = { + Name = "example-timestream-table" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `database_name` – (Required) The name of the Timestream database. +* `retention_properties` - (Optional) The retention duration for the memory store and magnetic store. See [Retention Properties](#retention-properties) below for more details. If not provided, `magnetic_store_retention_period_in_days` default to 73000 and `memory_store_retention_period_in_hours` defaults to 6. +* `table_name` - (Required) The name of the Timestream table. +* `tags` - (Optional) Map of tags to assign to this resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### Retention Properties + +The `retention_properties` block supports the following arguments: + +* `magnetic_store_retention_period_in_days` - (Required) The duration for which data must be stored in the magnetic store. Minimum value of 1. Maximum value of 73000. +* `memory_store_retention_period_in_hours` - (Required) The duration for which data must be stored in the memory store. Minimum value of 1. Maximum value of 8766. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The `table_name` and `database_name` separated by a colon (`:`). +* `arn` - The ARN that uniquely identifies this table. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Import + +Timestream tables can be imported using the `table_name` and `database_name` separate by a colon (`:`), e.g. + +``` +$ terraform import aws_timestreamwrite_table.example ExampleTable:ExampleDatabase +```