From 55eb723a30da21919865262d49995493ddaf0ce3 Mon Sep 17 00:00:00 2001 From: Wesley Yarde Date: Tue, 10 Mar 2020 05:28:31 +0100 Subject: [PATCH] Add tags to cluster node pools (#5931) I'm excited to present my first attempt to contribute back to Terraform. Thanks in advance for guiding me to get this PR in good shape to get merged. Goal of PR Add tags to AKS nodepools Changes Implemented tags for nodepools resource and datasource Extended some of the tests Updated documentation --- .../containers/data_source_kubernetes_cluster.go | 6 ++++++ .../services/containers/kubernetes_nodepool.go | 6 ++++++ .../resource_arm_kubernetes_cluster_node_pool.go | 12 +++++++++++- ...resource_arm_kubernetes_cluster_node_pool_test.go | 12 ++++++++++++ scripts/terrafmt-acctests.sh | 2 +- scripts/terrafmt-website.sh | 2 +- website/docs/d/kubernetes_cluster.html.markdown | 5 ++++- website/docs/r/kubernetes_cluster.html.markdown | 2 ++ .../r/kubernetes_cluster_node_pool.html.markdown | 6 ++++++ 9 files changed, 49 insertions(+), 4 deletions(-) diff --git a/azurerm/internal/services/containers/data_source_kubernetes_cluster.go b/azurerm/internal/services/containers/data_source_kubernetes_cluster.go index d094d12f80c1..6ffdfae93b02 100644 --- a/azurerm/internal/services/containers/data_source_kubernetes_cluster.go +++ b/azurerm/internal/services/containers/data_source_kubernetes_cluster.go @@ -149,6 +149,8 @@ func dataSourceArmKubernetesCluster() *schema.Resource { Computed: true, }, + "tags": tags.SchemaDataSource(), + "os_disk_size_gb": { Type: schema.TypeInt, Computed: true, @@ -763,6 +765,10 @@ func flattenKubernetesClusterDataSourceAgentPoolProfiles(input *[]containerservi agentPoolProfile["enable_node_public_ip"] = *profile.EnableNodePublicIP } + if profile.Tags != nil { + agentPoolProfile["tags"] = tags.Flatten(profile.Tags) + } + agentPoolProfiles = append(agentPoolProfiles, agentPoolProfile) } diff --git a/azurerm/internal/services/containers/kubernetes_nodepool.go b/azurerm/internal/services/containers/kubernetes_nodepool.go index 300fc90003b4..13a22c02e33a 100644 --- a/azurerm/internal/services/containers/kubernetes_nodepool.go +++ b/azurerm/internal/services/containers/kubernetes_nodepool.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/validation" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" ) @@ -105,6 +106,8 @@ func SchemaDefaultNodePool() *schema.Schema { Elem: &schema.Schema{Type: schema.TypeString}, }, + "tags": tags.Schema(), + "os_disk_size_gb": { Type: schema.TypeInt, Optional: true, @@ -146,6 +149,7 @@ func ConvertDefaultNodePoolToAgentPool(input *[]containerservice.ManagedClusterA ScaleSetEvictionPolicy: defaultCluster.ScaleSetEvictionPolicy, NodeLabels: defaultCluster.NodeLabels, NodeTaints: defaultCluster.NodeTaints, + Tags: defaultCluster.Tags, }, } } @@ -159,6 +163,7 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC nodeLabels := utils.ExpandMapStringPtrString(nodeLabelsRaw) nodeTaintsRaw := raw["node_taints"].([]interface{}) nodeTaints := utils.ExpandStringSlice(nodeTaintsRaw) + t := d.Get("tags").(map[string]interface{}) profile := containerservice.ManagedClusterAgentPoolProfile{ EnableAutoScaling: utils.Bool(enableAutoScaling), @@ -166,6 +171,7 @@ func ExpandDefaultNodePool(d *schema.ResourceData) (*[]containerservice.ManagedC Name: utils.String(raw["name"].(string)), NodeLabels: nodeLabels, NodeTaints: nodeTaints, + Tags: tags.Expand(t), Type: containerservice.AgentPoolType(raw["type"].(string)), VMSize: containerservice.VMSizeTypes(raw["vm_size"].(string)), diff --git a/azurerm/internal/services/containers/resource_arm_kubernetes_cluster_node_pool.go b/azurerm/internal/services/containers/resource_arm_kubernetes_cluster_node_pool.go index b3c724d9e673..64e84640e816 100644 --- a/azurerm/internal/services/containers/resource_arm_kubernetes_cluster_node_pool.go +++ b/azurerm/internal/services/containers/resource_arm_kubernetes_cluster_node_pool.go @@ -13,6 +13,7 @@ import ( "github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/validate" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/clients" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/features" + "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tags" azSchema "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/tf/schema" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/timeouts" "github.com/terraform-providers/terraform-provider-azurerm/azurerm/utils" @@ -54,6 +55,8 @@ func resourceArmKubernetesClusterNodePool() *schema.Resource { ValidateFunc: validation.IntBetween(1, 100), }, + "tags": tags.Schema(), + "vm_size": { Type: schema.TypeString, Required: true, @@ -200,12 +203,14 @@ func resourceArmKubernetesClusterNodePoolCreate(d *schema.ResourceData, meta int count := d.Get("node_count").(int) enableAutoScaling := d.Get("enable_auto_scaling").(bool) osType := d.Get("os_type").(string) + t := d.Get("tags").(map[string]interface{}) vmSize := d.Get("vm_size").(string) profile := containerservice.ManagedClusterAgentPoolProfileProperties{ OsType: containerservice.OSType(osType), EnableAutoScaling: utils.Bool(enableAutoScaling), EnableNodePublicIP: utils.Bool(d.Get("enable_node_public_ip").(bool)), + Tags: tags.Expand(t), Type: containerservice.VirtualMachineScaleSets, VMSize: containerservice.VMSizeTypes(vmSize), @@ -365,6 +370,11 @@ func resourceArmKubernetesClusterNodePoolUpdate(d *schema.ResourceData, meta int props.NodeTaints = nodeTaints } + if d.HasChange("tags") { + t := d.Get("tags").(map[string]interface{}) + props.Tags = tags.Expand(t) + } + // validate the auto-scale fields are both set/unset to prevent a continual diff maxCount := 0 if props.MaxCount != nil { @@ -492,7 +502,7 @@ func resourceArmKubernetesClusterNodePoolRead(d *schema.ResourceData, meta inter d.Set("vm_size", string(props.VMSize)) } - return nil + return tags.FlattenAndSet(d, resp.Tags) } func resourceArmKubernetesClusterNodePoolDelete(d *schema.ResourceData, meta interface{}) error { diff --git a/azurerm/internal/services/containers/tests/resource_arm_kubernetes_cluster_node_pool_test.go b/azurerm/internal/services/containers/tests/resource_arm_kubernetes_cluster_node_pool_test.go index e1174949da96..b67005eb1280 100644 --- a/azurerm/internal/services/containers/tests/resource_arm_kubernetes_cluster_node_pool_test.go +++ b/azurerm/internal/services/containers/tests/resource_arm_kubernetes_cluster_node_pool_test.go @@ -37,6 +37,7 @@ func testAccAzureRMKubernetesClusterNodePool_autoScale(t *testing.T) { Config: testAccAzureRMKubernetesClusterNodePool_autoScaleConfig(data, clientId, clientSecret), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "tags.%", "0"), ), }, data.ImportStep(), @@ -45,6 +46,7 @@ func testAccAzureRMKubernetesClusterNodePool_autoScale(t *testing.T) { Config: testAccAzureRMKubernetesClusterNodePool_manualScaleConfig(data, clientId, clientSecret), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "tags.Environment", "Staging"), ), }, data.ImportStep(), @@ -53,6 +55,7 @@ func testAccAzureRMKubernetesClusterNodePool_autoScale(t *testing.T) { Config: testAccAzureRMKubernetesClusterNodePool_autoScaleConfig(data, clientId, clientSecret), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "tags.%", "0"), ), }, data.ImportStep(), @@ -597,6 +600,7 @@ func testAccAzureRMKubernetesClusterNodePool_windows(t *testing.T) { Config: testAccAzureRMKubernetesClusterNodePool_windowsConfig(data, clientId, clientSecret), Check: resource.ComposeTestCheckFunc( testCheckAzureRMKubernetesNodePoolExists(data.ResourceName), + resource.TestCheckResourceAttr(data.ResourceName, "tags.Os", "Windows"), ), }, data.ImportStep(), @@ -881,6 +885,10 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { kubernetes_cluster_id = azurerm_kubernetes_cluster.test.id vm_size = "Standard_DS2_v2" node_count = 1 + + tags = { + Environment = "Staging" + } } `, template) } @@ -1104,6 +1112,10 @@ resource "azurerm_kubernetes_cluster_node_pool" "test" { vm_size = "Standard_DS2_v2" node_count = 1 os_type = "Windows" + + tags = { + Os = "Windows" + } } `, template) } diff --git a/scripts/terrafmt-acctests.sh b/scripts/terrafmt-acctests.sh index b0f6042b3ec8..9b340137b661 100755 --- a/scripts/terrafmt-acctests.sh +++ b/scripts/terrafmt-acctests.sh @@ -22,7 +22,7 @@ if ${error}; then echo "$ find azurerm | egrep \"_test.go\" | sort | while read f; do terrafmt fmt -f \$f; done" echo "" echo "on windows:" - echo "$ Get-ChildItem -Path . -Recurse -Filter \"_test.go\" | foreach {terrafmt fmt -f $.name}" + echo "$ Get-ChildItem -Path . -Recurse -Filter \"*_test.go\" | foreach {terrafmt fmt -f $_.fullName}" echo "" exit 1 fi diff --git a/scripts/terrafmt-website.sh b/scripts/terrafmt-website.sh index b45c8e7b7f1c..9738863b8d56 100755 --- a/scripts/terrafmt-website.sh +++ b/scripts/terrafmt-website.sh @@ -22,7 +22,7 @@ if ${error}; then echo "$ find . | egrep html.markdown | sort | while read f; do terrafmt fmt \$f; done" echo "" echo "on windows:" - echo "$ Get-ChildItem -Path . -Recurse -Filter \"*html.markdown\" | foreach {terrafmt fmt $.name}" + echo "$ Get-ChildItem -Path . -Recurse -Filter \"*html.markdown\" | foreach {terrafmt fmt $_.fullName}" echo "" exit 1 fi diff --git a/website/docs/d/kubernetes_cluster.html.markdown b/website/docs/d/kubernetes_cluster.html.markdown index 2df3295f6980..0aa90a6c3e69 100644 --- a/website/docs/d/kubernetes_cluster.html.markdown +++ b/website/docs/d/kubernetes_cluster.html.markdown @@ -114,15 +114,18 @@ A `agent_pool_profile` block exports the following: * `name` - The name assigned to this pool of agents. +* `node_taints` - The list of Kubernetes taints which are applied to nodes in the agent pool + * `os_disk_size_gb` - The size of the Agent VM's Operating System Disk in GB. * `os_type` - The Operating System used for the Agents. +* `tags` - A mapping of tags to assign to the resource. + * `vm_size` - The size of each VM in the Agent Pool (e.g. `Standard_F1`). * `vnet_subnet_id` - The ID of the Subnet where the Agents in the Pool are provisioned. -* `node_taints` - The list of Kubernetes taints which are applied to nodes in the agent pool --- diff --git a/website/docs/r/kubernetes_cluster.html.markdown b/website/docs/r/kubernetes_cluster.html.markdown index 1751d503e26c..3a340009d887 100644 --- a/website/docs/r/kubernetes_cluster.html.markdown +++ b/website/docs/r/kubernetes_cluster.html.markdown @@ -201,6 +201,8 @@ A `default_node_pool` block supports the following: * `type` - (Optional) The type of Node Pool which should be created. Possible values are `AvailabilitySet` and `VirtualMachineScaleSets`. Defaults to `VirtualMachineScaleSets`. +* `tags` - (Optional) A mapping of tags to assign to the resource. + * `vnet_subnet_id` - (Required) The ID of a Subnet where the Kubernetes Node Pool should exist. Changing this forces a new resource to be created. ~> **NOTE:** A Route Table must be configured on this Subnet. diff --git a/website/docs/r/kubernetes_cluster_node_pool.html.markdown b/website/docs/r/kubernetes_cluster_node_pool.html.markdown index fdc7da1b5624..0534a4d5c7bf 100644 --- a/website/docs/r/kubernetes_cluster_node_pool.html.markdown +++ b/website/docs/r/kubernetes_cluster_node_pool.html.markdown @@ -43,6 +43,10 @@ resource "azurerm_kubernetes_cluster_node_pool" "example" { kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id vm_size = "Standard_DS2_v2" node_count = 1 + + tags = { + Environment = "Production" + } } ``` @@ -78,6 +82,8 @@ The following arguments are supported: * `os_type` - (Optional) The Operating System which should be used for this Node Pool. Changing this forces a new resource to be created. Possible values are `Linux` and `Windows`. Defaults to `Linux`. +* `tags` - (Optional) A mapping of tags to assign to the resource. + * `vnet_subnet_id` - (Optional) The ID of the Subnet where this Node Pool should exist. -> **NOTE:** At this time the `vnet_subnet_id` must be the same for all node pools in the cluster