diff --git a/.changelog/34220.txt b/.changelog/34220.txt new file mode 100644 index 00000000000..a70fa68bb60 --- /dev/null +++ b/.changelog/34220.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_finspace_kx_cluster: In-place updates are now supported for the `code`, `database`, and `initialization_script` arguments. The update timeout has been increased to 30 minutes. +``` diff --git a/internal/service/finspace/kx_cluster.go b/internal/service/finspace/kx_cluster.go index 36760452873..cc303fa6b61 100644 --- a/internal/service/finspace/kx_cluster.go +++ b/internal/service/finspace/kx_cluster.go @@ -43,7 +43,7 @@ func ResourceKxCluster() *schema.Resource { Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(45 * time.Minute), - Update: schema.DefaultTimeout(2 * time.Minute), // Tags only + Update: schema.DefaultTimeout(30 * time.Minute), Delete: schema.DefaultTimeout(60 * time.Minute), }, @@ -155,26 +155,22 @@ func ResourceKxCluster() *schema.Resource { "code": { Type: schema.TypeList, Optional: true, - ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "s3_bucket": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.StringLenBetween(3, 255), }, "s3_key": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: validation.StringLenBetween(3, 1024), }, "s3_object_version": { Type: schema.TypeString, Optional: true, - ForceNew: true, ValidateFunc: validation.StringLenBetween(3, 63), }, }, @@ -184,7 +180,6 @@ func ResourceKxCluster() *schema.Resource { Type: schema.TypeMap, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, - ForceNew: true, ValidateDiagFunc: validation.AllDiag( validation.MapKeyLenBetween(1, 50), validation.MapValueLenBetween(1, 50), @@ -197,13 +192,11 @@ func ResourceKxCluster() *schema.Resource { "database": { Type: schema.TypeList, Optional: true, - ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cache_configurations": { Type: schema.TypeList, Optional: true, - ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cache_type": { @@ -217,7 +210,6 @@ func ResourceKxCluster() *schema.Resource { Type: schema.TypeString, }, Optional: true, - ForceNew: true, }, }, }, @@ -225,7 +217,6 @@ func ResourceKxCluster() *schema.Resource { "changeset_id": { Type: schema.TypeString, Optional: true, - ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 26), }, "database_name": { @@ -258,7 +249,6 @@ func ResourceKxCluster() *schema.Resource { "initialization_script": { Type: schema.TypeString, Optional: true, - ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 255), }, "last_modified_timestamp": { @@ -509,10 +499,8 @@ func resourceKxClusterRead(ctx context.Context, d *schema.ResourceData, meta int return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) } - if d.IsNewResource() { - if err := d.Set("database", flattenDatabases(out.Databases)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) - } + if err := d.Set("database", flattenDatabases(out.Databases)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) } if err := d.Set("command_line_arguments", flattenCommandLineArguments(out.CommandLineArguments)); err != nil { @@ -537,7 +525,66 @@ func resourceKxClusterRead(ctx context.Context, d *schema.ResourceData, meta int func resourceKxClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - // Tags only. + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + updateDb := false + updateCode := false + + CodeConfigIn := &finspace.UpdateKxClusterCodeConfigurationInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + ClusterName: aws.String(d.Get("name").(string)), + } + + DatabaseConfigIn := &finspace.UpdateKxClusterDatabasesInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + ClusterName: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("database"); ok && len(v.([]interface{})) > 0 && d.HasChanges("database") { + DatabaseConfigIn.Databases = expandDatabases(d.Get("database").([]interface{})) + updateDb = true + } + + if v, ok := d.GetOk("code"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil && d.HasChanges("code") { + CodeConfigIn.Code = expandCode(v.([]interface{})) + updateCode = true + } + + if v, ok := d.GetOk("initialization_script"); ok && d.HasChanges("initialization_script") { + CodeConfigIn.Code = expandCode(d.Get("code").([]interface{})) + CodeConfigIn.InitializationScript = aws.String(v.(string)) + updateCode = true + } + + if v, ok := d.GetOk("command_line_arguments"); ok && len(v.(map[string]interface{})) > 0 && d.HasChanges("command_line_arguments") { + CodeConfigIn.Code = expandCode(d.Get("code").([]interface{})) + CodeConfigIn.CommandLineArguments = expandCommandLineArguments(v.(map[string]interface{})) + updateCode = true + } + + if updateDb { + log.Printf("[DEBUG] Updating FinSpace KxClusterDatabases (%s): %#v", d.Id(), DatabaseConfigIn) + if _, err := conn.UpdateKxClusterDatabases(ctx, DatabaseConfigIn); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxCluster, d.Id(), err)...) + } + if _, err := waitKxClusterUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxCluster, d.Id(), err)...) + } + } + + if updateCode { + log.Printf("[DEBUG] Updating FinSpace KxClusterCodeConfiguration (%s): %#v", d.Id(), CodeConfigIn) + if _, err := conn.UpdateKxClusterCodeConfiguration(ctx, CodeConfigIn); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxCluster, d.Id(), err)...) + } + if _, err := waitKxClusterUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxCluster, d.Id(), err)...) + } + } + + if !updateCode && !updateDb { + return diags + } return append(diags, resourceKxClusterRead(ctx, d, meta)...) } @@ -585,6 +632,24 @@ func waitKxClusterCreated(ctx context.Context, conn *finspace.Client, id string, return nil, err } +func waitKxClusterUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxClusterOutput, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxClusterStatusPending, types.KxClusterStatusUpdating), + Target: enum.Slice(types.KxClusterStatusRunning), + Refresh: statusKxCluster(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxClusterOutput); ok { + return out, err + } + + return nil, err +} + func waitKxClusterDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxClusterOutput, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.KxClusterStatusDeleting), diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 6860f338724..6fc14e33133 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -138,6 +138,7 @@ func TestAccFinSpaceKxCluster_database(t *testing.T) { } ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_cluster.test" @@ -280,6 +281,7 @@ func TestAccFinSpaceKxCluster_code(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_cluster.test" codePath := "test-fixtures/code.zip" + updatedCodePath := "test-fixtures/updated_code.zip" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -292,9 +294,25 @@ func TestAccFinSpaceKxCluster_code(t *testing.T) { CheckDestroy: testAccCheckKxClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccKxClusterConfig_code(rName, codePath), + Config: testAccKxClusterConfig_code(rName, codePath, updatedCodePath, codePath), Check: resource.ComposeTestCheckFunc( testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "code.*", map[string]string{ + "s3_bucket": rName, + "s3_key": codePath, + }), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + { + Config: testAccKxClusterConfig_code(rName, codePath, updatedCodePath, updatedCodePath), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "code.*", map[string]string{ + "s3_bucket": rName, + "s3_key": updatedCodePath, + }), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), ), }, }, @@ -434,9 +452,9 @@ func TestAccFinSpaceKxCluster_initializationScript(t *testing.T) { var kxcluster finspace.GetKxClusterOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_cluster.test" - // Need to set these to the bucket/key you want to use codePath := "test-fixtures/code.zip" initScriptPath := "code/helloworld.q" + updatedInitScriptPath := "code/helloworld_updated.q" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -452,6 +470,14 @@ func TestAccFinSpaceKxCluster_initializationScript(t *testing.T) { Config: testAccKxClusterConfig_initScript(rName, codePath, initScriptPath), Check: resource.ComposeTestCheckFunc( testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + { + Config: testAccKxClusterConfig_initScript(rName, codePath, updatedInitScriptPath), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), ), }, }, @@ -466,6 +492,7 @@ func TestAccFinSpaceKxCluster_commandLineArgs(t *testing.T) { ctx := acctest.Context(t) var kxcluster finspace.GetKxClusterOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + codePath := "test-fixtures/code.zip" resourceName := "aws_finspace_kx_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -479,11 +506,21 @@ func TestAccFinSpaceKxCluster_commandLineArgs(t *testing.T) { CheckDestroy: testAccCheckKxClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccKxClusterConfig_commandLineArgs1(rName, "arg1", "value1"), + Config: testAccKxClusterConfig_commandLineArgs(rName, "arg1", "value1", codePath), Check: resource.ComposeTestCheckFunc( testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), resource.TestCheckResourceAttr(resourceName, "command_line_arguments.%", "1"), resource.TestCheckResourceAttr(resourceName, "command_line_arguments.arg1", "value1"), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + { + Config: testAccKxClusterConfig_commandLineArgs(rName, "arg1", "value2", codePath), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "command_line_arguments.%", "1"), + resource.TestCheckResourceAttr(resourceName, "command_line_arguments.arg1", "value2"), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), ), }, }, @@ -762,10 +799,82 @@ resource "aws_finspace_kx_cluster" "test" { `, rName, description)) } -func testAccKxClusterConfig_commandLineArgs1(rName, arg1, val1 string) string { +func testAccKxClusterConfig_commandLineArgs(rName, arg1, val1, codePath string) string { return acctest.ConfigCompose( testAccKxClusterConfigBase(rName), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +data "aws_iam_policy_document" "bucket_policy" { + statement { + actions = [ + "s3:GetObject", + "s3:GetObjectTagging" + ] + + resources = [ + "arn:${data.aws_partition.current.partition}:s3:::${aws_s3_bucket.test.id}/*", + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "s3:ListBucket" + ] + + resources = [ + "arn:${data.aws_partition.current.partition}:s3:::${aws_s3_bucket.test.id}", + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } +} + +resource "aws_s3_bucket_policy" "test" { + bucket = aws_s3_bucket.test.id + policy = data.aws_iam_policy_document.bucket_policy.json +} + +resource "aws_s3_object" "object" { + bucket = aws_s3_bucket.test.id + key = %[4]q + source = %[4]q +} + resource "aws_finspace_kx_cluster" "test" { name = %[1]q environment_id = aws_finspace_kx_environment.test.id @@ -785,11 +894,16 @@ resource "aws_finspace_kx_cluster" "test" { ip_address_type = "IP_V4" } + code { + s3_bucket = aws_s3_bucket.test.id + s3_key = %[4]q + } + command_line_arguments = { %[2]q = %[3]q } } -`, rName, arg1, val1)) +`, rName, arg1, val1, codePath)) } func testAccKxClusterConfig_tags1(rName, tagKey1, tagValue1 string) string { @@ -872,6 +986,10 @@ resource "aws_finspace_kx_cluster" "test" { database { database_name = aws_finspace_kx_database.test.name + cache_configurations { + cache_type = "CACHE_1000" + db_paths = ["/"] + } } capacity_configuration { @@ -879,6 +997,11 @@ resource "aws_finspace_kx_cluster" "test" { node_type = "kx.s.xlarge" } + cache_storage_configurations { + size = 1200 + type = "CACHE_1000" + } + vpc_configuration { vpc_id = aws_vpc.test.id security_group_ids = [aws_security_group.test.id] @@ -1024,7 +1147,7 @@ resource "aws_finspace_kx_cluster" "test" { `, rName)) } -func testAccKxClusterConfig_code(rName, path string) string { +func testAccKxClusterConfig_code(rName, path string, path2 string, clusterPath string) string { return acctest.ConfigCompose( testAccKxClusterConfigBase(rName), fmt.Sprintf(` @@ -1100,6 +1223,12 @@ resource "aws_s3_object" "object" { source = %[2]q } +resource "aws_s3_object" "updated_object" { + bucket = aws_s3_bucket.test.id + key = %[3]q + source = %[3]q +} + resource "aws_finspace_kx_cluster" "test" { name = %[1]q environment_id = aws_finspace_kx_environment.test.id @@ -1121,10 +1250,10 @@ resource "aws_finspace_kx_cluster" "test" { code { s3_bucket = aws_s3_bucket.test.id - s3_key = aws_s3_object.object.key + s3_key = %[4]q } } -`, rName, path)) +`, rName, path, path2, clusterPath)) } func testAccKxClusterConfig_multiAZ(rName string) string { diff --git a/internal/service/finspace/test-fixtures/code.zip b/internal/service/finspace/test-fixtures/code.zip index 34a083bc499..65f9240a0e2 100644 Binary files a/internal/service/finspace/test-fixtures/code.zip and b/internal/service/finspace/test-fixtures/code.zip differ diff --git a/internal/service/finspace/test-fixtures/updated_code.zip b/internal/service/finspace/test-fixtures/updated_code.zip new file mode 100644 index 00000000000..65f9240a0e2 Binary files /dev/null and b/internal/service/finspace/test-fixtures/updated_code.zip differ diff --git a/website/docs/r/finspace_kx_cluster.html.markdown b/website/docs/r/finspace_kx_cluster.html.markdown index de7c95f5dfa..f796865240a 100644 --- a/website/docs/r/finspace_kx_cluster.html.markdown +++ b/website/docs/r/finspace_kx_cluster.html.markdown @@ -178,7 +178,7 @@ This resource exports the following attributes in addition to the arguments abov [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): * `create` - (Default `45m`) -* `update` - (Default `2m`) +* `update` - (Default `30m`) * `delete` - (Default `60m`) ## Import