diff --git a/.changelog/12014.txt b/.changelog/12014.txt new file mode 100644 index 00000000000..0502b9e182c --- /dev/null +++ b/.changelog/12014.txt @@ -0,0 +1,6 @@ +```release-note:bug +container: fixed missing in-place updates for some `google_container_cluster.node_config` subfields +``` +```release-note:enhancement +container: added in-place update support for `labels`, `resource_manager_tags` and `workload_metadata_config` in `google_container_cluster.node_config` +``` \ No newline at end of file diff --git a/google/services/container/resource_container_cluster.go b/google/services/container/resource_container_cluster.go index b53e267dd43..2dae99d84a4 100644 --- a/google/services/container/resource_container_cluster.go +++ b/google/services/container/resource_container_cluster.go @@ -98,12 +98,6 @@ var ( "private_cluster_config.0.master_global_access_config", } - forceNewClusterNodeConfigFields = []string{ - "labels", - "workload_metadata_config", - "resource_manager_tags", - } - suppressDiffForAutopilot = schema.SchemaDiffSuppressFunc(func(k, oldValue, newValue string, d *schema.ResourceData) bool { if v, _ := d.Get("enable_autopilot").(bool); v { return true @@ -120,19 +114,6 @@ var ( }) ) -// This uses the node pool nodeConfig schema but sets -// node-pool-only updatable fields to ForceNew -func clusterSchemaNodeConfig() *schema.Schema { - nodeConfigSch := schemaNodeConfig() - schemaMap := nodeConfigSch.Elem.(*schema.Resource).Schema - for _, k := range forceNewClusterNodeConfigFields { - if sch, ok := schemaMap[k]; ok { - tpgresource.ChangeFieldSchemaToForceNew(sch) - } - } - return nodeConfigSch -} - // Defines default nodel pool settings for the entire cluster. These settings are // overridden if specified on the specific NodePool object. func clusterSchemaNodePoolDefaults() *schema.Schema { @@ -1334,7 +1315,7 @@ func ResourceContainerCluster() *schema.Resource { }, }, - "node_config": clusterSchemaNodeConfig(), + "node_config": schemaNodeConfig(), "node_pool": { Type: schema.TypeList, @@ -3536,133 +3517,15 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } if d.HasChange("node_config") { - if d.HasChange("node_config.0.image_type") { - it := d.Get("node_config.0.image_type").(string) - req := &container.UpdateClusterRequest{ - Update: &container.ClusterUpdate{ - DesiredImageType: it, - }, - } - - updateF := func() error { - name := containerClusterFullName(project, location, clusterName) - clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) - if config.UserProjectOverride { - clusterUpdateCall.Header().Add("X-Goog-User-Project", project) - } - op, err := clusterUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, project, location, "updating GKE image type", userAgent, d.Timeout(schema.TimeoutUpdate)) - } - // Call update serially. - if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] GKE cluster %s: image type has been updated to %s", d.Id(), it) - } - - if d.HasChange("node_config.0.kubelet_config") { - - defaultPool := "default-pool" - - timeout := d.Timeout(schema.TimeoutCreate) - - nodePoolInfo, err := extractNodePoolInformationFromCluster(d, config, clusterName) - if err != nil { - return err - } - - // Acquire write-lock on nodepool. - npLockKey := nodePoolInfo.nodePoolLockKey(defaultPool) - - // Still should be further consolidated / DRYed up - // See b/361634104 - it := d.Get("node_config.0.kubelet_config") - - // While we're getting the value from fields in - // node_config.kubelet_config, the actual setting that needs to be - // updated is on the default nodepool. - req := &container.UpdateNodePoolRequest{ - Name: defaultPool, - KubeletConfig: expandKubeletConfig(it), - } + defaultPool := "default-pool" - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(defaultPool), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, - "updating GKE node pool kubelet_config", userAgent, timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] GKE cluster %s: kubelet_config updated", d.Id()) - } - - if d.HasChange("node_config.0.gcfs_config") { - - defaultPool := "default-pool" - - timeout := d.Timeout(schema.TimeoutCreate) - - nodePoolInfo, err := extractNodePoolInformationFromCluster(d, config, clusterName) - if err != nil { - return err - } - - // Acquire write-lock on nodepool. - npLockKey := nodePoolInfo.nodePoolLockKey(defaultPool) - - gcfsEnabled := d.Get("node_config.0.gcfs_config.0.enabled").(bool) - - // While we're getting the value from the drepcated field in - // node_config.kubelet_config, the actual setting that needs to be updated - // is on the default nodepool. - req := &container.UpdateNodePoolRequest{ - Name: defaultPool, - GcfsConfig: &container.GcfsConfig{ - Enabled: gcfsEnabled, - }, - } - - updateF := func() error { - clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(defaultPool), req) - if config.UserProjectOverride { - clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) - } - op, err := clusterNodePoolsUpdateCall.Do() - if err != nil { - return err - } - - // Wait until it's updated - return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, - "updating GKE node pool gcfs_config", userAgent, timeout) - } - - if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { - return err - } - - log.Printf("[INFO] GKE cluster %s: %s setting for gcfs_config updated to %t", d.Id(), defaultPool, gcfsEnabled) + nodePoolInfo, err := extractNodePoolInformationFromCluster(d, config, clusterName) + if err != nil { + return err } + nodePoolNodeConfigUpdate(d, config, nodePoolInfo, "", defaultPool, d.Timeout(schema.TimeoutUpdate)) } if d.HasChange("notification_config") { diff --git a/google/services/container/resource_container_cluster_migratev1.go b/google/services/container/resource_container_cluster_migratev1.go index 3c68f9221f8..d9b65aeecb5 100644 --- a/google/services/container/resource_container_cluster_migratev1.go +++ b/google/services/container/resource_container_cluster_migratev1.go @@ -1055,7 +1055,7 @@ func resourceContainerClusterResourceV1() *schema.Resource { }, }, - "node_config": clusterSchemaNodeConfig(), + "node_config": schemaNodeConfig(), "node_pool": { Type: schema.TypeList, diff --git a/google/services/container/resource_container_cluster_test.go b/google/services/container/resource_container_cluster_test.go index df5612905b4..58c6dd45b2d 100644 --- a/google/services/container/resource_container_cluster_test.go +++ b/google/services/container/resource_container_cluster_test.go @@ -70,6 +70,10 @@ func TestAccContainerCluster_resourceManagerTags(t *testing.T) { networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + if acctest.BootstrapPSARole(t, "service-", "container-engine-robot", "roles/resourcemanager.tagHoldAdmin") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), @@ -79,7 +83,31 @@ func TestAccContainerCluster_resourceManagerTags(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerCluster_resourceManagerTags(pid, clusterName, networkName, subnetworkName, randomSuffix), + Config: testAccContainerCluster_resourceManagerTags(pid, clusterName, networkName, subnetworkName, randomSuffix, 1), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "self_link"), + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "node_config.0.resource_manager_tags.%"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("us-central1-a/%s", clusterName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_resourceManagerTags(pid, clusterName, networkName, subnetworkName, randomSuffix, 2), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttrSet("google_container_cluster.primary", "self_link"), resource.TestCheckResourceAttrSet("google_container_cluster.primary", "node_config.0.resource_manager_tags.%"), @@ -1340,6 +1368,11 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccContainerCluster_withNodeConfig(clusterName, networkName, subnetworkName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, }, { ResourceName: "google_container_cluster.with_node_config", @@ -1349,6 +1382,11 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { }, { Config: testAccContainerCluster_withNodeConfigUpdate(clusterName, networkName, subnetworkName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, }, { ResourceName: "google_container_cluster.with_node_config", @@ -1360,6 +1398,133 @@ func TestAccContainerCluster_withNodeConfig(t *testing.T) { }) } +func TestAccContainerCluster_withNodeConfigLinuxNodeConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + // First test with empty `node_config.linux_node_config` (should result in "CGROUP_MODE_UNSPECIFIED") + { + Config: testAccContainerCluster_withNodeConfigLinuxNodeConfig(clusterName, networkName, subnetworkName, ""), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + // Then add a config and make sure it updates. + { + Config: testAccContainerCluster_withNodeConfigLinuxNodeConfig(clusterName, networkName, subnetworkName, "CGROUP_MODE_V2"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_cluster.with_linux_node_config", + "node_config.0.linux_node_config.0.cgroup_mode", "CGROUP_MODE_V2", + ), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + // Lastly, update the setting in-place. V1 since UNSPECIFIED is default + { + Config: testAccContainerCluster_withNodeConfigLinuxNodeConfig(clusterName, networkName, subnetworkName, "CGROUP_MODE_V1"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_cluster.with_linux_node_config", + "node_config.0.linux_node_config.0.cgroup_mode", "CGROUP_MODE_V1", + ), + ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + }, + { + ResourceName: "google_container_cluster.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodeConfigFastSocket(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodeConfigFastSocket(clusterName, networkName, subnetworkName, false), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_fast_socket", + "node_config.0.fast_socket.0.enabled", "false"), + resource.TestCheckResourceAttr("google_container_cluster.with_fast_socket", + "node_config.0.gvnic.0.enabled", "true"), + ), + }, + { + ResourceName: "google_container_cluster.with_fast_socket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNodeConfigFastSocket(clusterName, networkName, subnetworkName, true), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_fast_socket", + "node_config.0.fast_socket.0.enabled", "true"), + ), + }, + { + ResourceName: "google_container_cluster.with_fast_socket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + func TestAccContainerCluster_withNodeConfigGcfsConfig(t *testing.T) { t.Parallel() clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) @@ -1865,7 +2030,12 @@ func TestAccContainerCluster_withWorkloadMetadataConfig(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerCluster_withWorkloadMetadataConfig(clusterName, networkName, subnetworkName), + Config: testAccContainerCluster_withWorkloadMetadataConfig(clusterName, "GCE_METADATA", networkName, subnetworkName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_cluster.with_workload_metadata_config", "node_config.0.workload_metadata_config.0.mode", "GCE_METADATA"), @@ -1877,6 +2047,24 @@ func TestAccContainerCluster_withWorkloadMetadataConfig(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, + { + Config: testAccContainerCluster_withWorkloadMetadataConfig(clusterName, "GKE_METADATA", networkName, subnetworkName), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + acctest.ExpectNoDelete(), + }, + }, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_workload_metadata_config", + "node_config.0.workload_metadata_config.0.mode", "GKE_METADATA"), + ), + }, + { + ResourceName: "google_container_cluster.with_workload_metadata_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, }, }) } @@ -6084,7 +6272,7 @@ resource "google_container_cluster" "with_node_config" { initial_node_count = 1 node_config { - machine_type = "n1-standard-1" // can't be e2 because of local-ssd + machine_type = "n1-standard-1" // can't be e2 because of local-ssd disk_size_gb = 15 disk_type = "pd-ssd" local_ssd_count = 1 @@ -6122,12 +6310,35 @@ resource "google_container_cluster" "with_node_config" { image_type = "COS_CONTAINERD" } deletion_protection = false - network = "%s" - subnetwork = "%s" + network = "%s" + subnetwork = "%s" } `, clusterName, networkName, subnetworkName) } +func testAccContainerCluster_withNodeConfigFastSocket(clusterName, networkName, subnetworkName string, fastSocket bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_fast_socket" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_config { + gvnic { + enabled = true + } + fast_socket { + enabled = %t + } + } + + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, fastSocket, networkName, subnetworkName) +} + func testAccContainerCluster_withNodeConfigGcfsConfig(clusterName, networkName, subnetworkName string, enabled bool) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_config_gcfs_config" { @@ -6355,9 +6566,11 @@ resource "google_container_cluster" "with_node_config" { initial_node_count = 1 node_config { - machine_type = "n1-standard-1" // can't be e2 because of local-ssd - disk_size_gb = 15 - disk_type = "pd-ssd" + // Test updates for these fields as well + machine_type = "n1-standard-2" // can't be e2 because of local-ssd + disk_size_gb = 20 + disk_type = "pd-balanced" + local_ssd_count = 1 oauth_scopes = [ "https://www.googleapis.com/auth/monitoring", @@ -6370,35 +6583,68 @@ resource "google_container_cluster" "with_node_config" { foo = "bar" disable-legacy-endpoints = "true" } + labels = { - foo = "bar" + foo = "baz" + qux = "zzz" } - tags = ["foo", "bar"] + tags = ["baz", "qux"] preemptible = true min_cpu_platform = "Intel Broadwell" taint { key = "taint_key" value = "taint_value" - effect = "PREFER_NO_SCHEDULE" + effect = "NO_SCHEDULE" } taint { key = "taint_key2" - value = "taint_value2" + value = "taint_value2_updated" effect = "NO_EXECUTE" } - // Updatable fields image_type = "UBUNTU_CONTAINERD" } deletion_protection = false - network = "%s" - subnetwork = "%s" + network = "%s" + subnetwork = "%s" } `, clusterName, networkName, subnetworkName) } +func testAccContainerCluster_withNodeConfigLinuxNodeConfig(clusterName, networkName, subnetworkName, cgroupMode string) string { + // Empty block inside node_config if cgroupMode is empty + linuxNodeConfig := "" + + if cgroupMode != "" { + linuxNodeConfig = fmt.Sprintf(` + linux_node_config { + cgroup_mode = "%s" + } +`, cgroupMode) + } + + return fmt.Sprintf(` +resource "google_container_cluster" "with_linux_node_config" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_config { + disk_size_gb = 15 + + %s + } + + network = "%s" + subnetwork = "%s" + + deletion_protection = false +} +`, clusterName, linuxNodeConfig, networkName, subnetworkName) +} + func testAccContainerCluster_withNodeConfigScopeAlias(clusterName, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_node_config_scope_alias" { @@ -6577,18 +6823,24 @@ resource "google_container_cluster" "with_node_config" { `, reservation, clusterName, networkName, subnetworkName) } -func testAccContainerCluster_withWorkloadMetadataConfig(clusterName, networkName, subnetworkName string) string { +func testAccContainerCluster_withWorkloadMetadataConfig(clusterName, workloadMetadataConfigMode, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { location = "us-central1-a" } +data "google_project" "project" {} + resource "google_container_cluster" "with_workload_metadata_config" { name = "%s" location = "us-central1-a" initial_node_count = 1 min_master_version = data.google_container_engine_versions.central1a.latest_master_version + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + node_config { oauth_scopes = [ "https://www.googleapis.com/auth/logging.write", @@ -6596,14 +6848,14 @@ resource "google_container_cluster" "with_workload_metadata_config" { ] workload_metadata_config { - mode = "GCE_METADATA" + mode = "%s" } } deletion_protection = false - network = "%s" - subnetwork = "%s" + network = "%s" + subnetwork = "%s" } -`, clusterName, networkName, subnetworkName) +`, clusterName, workloadMetadataConfigMode, networkName, subnetworkName) } func testAccContainerCluster_withBootDiskKmsKey(clusterName, kmsKeyName, networkName, subnetworkName string) string { @@ -9653,7 +9905,7 @@ resource "google_container_cluster" "primary" { `, name, networkName, subnetworkName) } -func testAccContainerCluster_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { +func testAccContainerCluster_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string, tagResourceNumber int) string { return fmt.Sprintf(` data "google_project" "project" { project_id = "%[1]s" @@ -9662,13 +9914,13 @@ data "google_project" "project" { resource "google_project_iam_member" "tagHoldAdmin" { project = "%[1]s" role = "roles/resourcemanager.tagHoldAdmin" - member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" } resource "google_project_iam_member" "tagUser1" { project = "%[1]s" role = "roles/resourcemanager.tagUser" - member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" depends_on = [google_project_iam_member.tagHoldAdmin] } @@ -9676,7 +9928,7 @@ resource "google_project_iam_member" "tagUser1" { resource "google_project_iam_member" "tagUser2" { project = "%[1]s" role = "roles/resourcemanager.tagUser" - member = "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com" + member = "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com" depends_on = [google_project_iam_member.tagHoldAdmin] } @@ -9691,22 +9943,39 @@ resource "time_sleep" "wait_120_seconds" { ] } -resource "google_tags_tag_key" "key" { - parent = "projects/%[1]s" - short_name = "foobarbaz-%[2]s" +resource "google_tags_tag_key" "key1" { + parent = data.google_project.project.id + short_name = "foobarbaz-%[2]s" description = "For foo/bar resources" - purpose = "GCE_FIREWALL" + purpose = "GCE_FIREWALL" purpose_data = { network = "%[1]s/%[4]s" } } -resource "google_tags_tag_value" "value" { - parent = "tagKeys/${google_tags_tag_key.key.name}" - short_name = "foo-%[2]s" +resource "google_tags_tag_value" "value1" { + parent = google_tags_tag_key.key1.id + short_name = "foo-%[2]s" description = "For foo resources" } +# To test updates: create two key / value sets, and swap them for the update +resource "google_tags_tag_key" "key2" { + parent = data.google_project.project.id + short_name = "qux-%[2]s" + description = "For qux resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } +} + +resource "google_tags_tag_value" "value2" { + parent = google_tags_tag_key.key2.id + short_name = "qux-%[2]s" + description = "For qux resources" +} + data "google_container_engine_versions" "uscentral1a" { location = "us-central1-a" } @@ -9718,21 +9987,20 @@ resource "google_container_cluster" "primary" { initial_node_count = 1 node_config { - machine_type = "n1-standard-1" // can't be e2 because of local-ssd - disk_size_gb = 15 + disk_size_gb = 15 resource_manager_tags = { - "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + (google_tags_tag_key.key%[6]d.id) = google_tags_tag_value.value%[6]d.id } } deletion_protection = false - network = "%[4]s" - subnetwork = "%[5]s" + network = "%[4]s" + subnetwork = "%[5]s" depends_on = [time_sleep.wait_120_seconds] } -`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +`, projectID, randomSuffix, clusterName, networkName, subnetworkName, tagResourceNumber) } func testAccContainerCluster_withAutopilotResourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string {