Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expose additional properties during cluster creation #384

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
131 changes: 131 additions & 0 deletions google/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,12 @@ func resourceContainerCluster() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"issue_client_certificate": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: true,
},
"password": {
Type: schema.TypeString,
Required: true,
Expand Down Expand Up @@ -237,6 +243,30 @@ func resourceContainerCluster() *schema.Resource {
ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"autoscaling": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"min_node_count": {
Type: schema.TypeInt,
Optional: true,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
},

"max_node_count": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
},
},
},
},

"initial_node_count": {
Type: schema.TypeInt,
Optional: true,
Expand All @@ -245,6 +275,32 @@ func resourceContainerCluster() *schema.Resource {
Deprecated: "Use node_count instead",
},

"management": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Computed: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{

"auto_repair": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
ForceNew: true,
},

"auto_upgrade": {
Type: schema.TypeBool,
Optional: true,
Computed: true,
ForceNew: true,
},
},
},
},

"node_count": {
Type: schema.TypeInt,
Optional: true,
Expand Down Expand Up @@ -304,6 +360,12 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
Password: masterAuth["password"].(string),
Username: masterAuth["username"].(string),
}

if v, ok := masterAuth["issue_client_certificate"]; ok {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: since you set a default, you can remove this if statement

cluster.MasterAuth.ClientCertificateConfig = &container.ClientCertificateConfig{
IssueClientCertificate: v.(bool),
}
}
}

if v, ok := d.GetOk("node_version"); ok {
Expand Down Expand Up @@ -413,6 +475,47 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
nodePool.Config = expandNodeConfig(v)
}

if v, ok := d.GetOk(prefix + ".autoscaling"); ok {
autoscalingConfig := v.([]interface{})[0].(map[string]interface{})
nodePool.Autoscaling = &container.NodePoolAutoscaling{}

// default behavior is disabled. Set to true as the cluster has it defined giving the intent to enable
// it
nodePool.Autoscaling.Enabled = true
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: you can put this in the block on line 480


var minNodeCount int
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think you need this var

minNodeCount = 1

if v, ok := autoscalingConfig["min_node_count"]; ok {
minNodeCount = v.(int)
nodePool.Autoscaling.MinNodeCount = int64(minNodeCount)
}

if v, ok := autoscalingConfig["max_node_count"]; ok {
var maxNodeCount int
maxNodeCount = v.(int)

if maxNodeCount < minNodeCount {
return fmt.Errorf("Cannot set autoscaling option max_node_count to less than the min_node_count value on nodepool %d", i)
}

nodePool.Autoscaling.MaxNodeCount = int64(maxNodeCount)
}
}

if v, ok := d.GetOk(prefix + ".management"); ok {
managementConfig := v.([]interface{})[0].(map[string]interface{})
nodePool.Management = &container.NodeManagement{}

if v, ok := managementConfig["auto_repair"]; ok {
nodePool.Management.AutoRepair = v.(bool)
}

if v, ok := managementConfig["auto_upgrade"]; ok {
nodePool.Management.AutoUpgrade = v.(bool)
}
}

nodePools = append(nodePools, nodePool)
}
cluster.NodePools = nodePools
Expand Down Expand Up @@ -483,6 +586,15 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
"cluster_ca_certificate": cluster.MasterAuth.ClusterCaCertificate,
},
}

// we might not get this value back on existing clusters, however we can insinuate that
// if the client certificate doesn't exist the cluster was created with this set to false.
if cluster.MasterAuth.ClientCertificateConfig != nil {
masterAuth[0]["issue_client_certificate"] = cluster.MasterAuth.ClientCertificateConfig.IssueClientCertificate
} else {
masterAuth[0]["issue_client_certificate"] = cluster.MasterAuth.ClientCertificate != ""
}

d.Set("master_auth", masterAuth)

d.Set("initial_node_count", cluster.InitialNodeCount)
Expand Down Expand Up @@ -778,6 +890,25 @@ func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*contai
"node_count": size / len(np.InstanceGroupUrls),
"node_config": flattenClusterNodeConfig(np.Config),
}

if np.Management != nil {
nodePool["management"] = []map[string]interface{}{
{
"auto_repair": np.Management.AutoRepair,
"auto_upgrade": np.Management.AutoUpgrade,
},
}
}

if np.Autoscaling != nil {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you probably also want to check that np.Autoscaling.Enabled

nodePool["autoscaling"] = []map[string]interface{}{
{
"min_node_count": np.Autoscaling.MinNodeCount,
"max_node_count": np.Autoscaling.MaxNodeCount,
},
}
}

nodePools = append(nodePools, nodePool)
}

Expand Down
168 changes: 168 additions & 0 deletions google/resource_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,23 @@ func TestAccContainerCluster_withMasterAuth(t *testing.T) {
})
}

func TestAccContainerCluster_withMasterAuthNoClientCert(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withMasterAuthNoClientCert,
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster(
"google_container_cluster.with_master_auth_no_client_cert"),
),
},
},
})
}

func TestAccContainerCluster_withAdditionalZones(t *testing.T) {
clusterName := fmt.Sprintf("cluster-test-%s", acctest.RandString(10))

Expand Down Expand Up @@ -374,6 +391,40 @@ func TestAccContainerCluster_withNodePoolNodeConfig(t *testing.T) {
})
}

func TestAccContainerCluster_withNodePoolManagement(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withNodePoolManagement(),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster(
"google_container_cluster.with_node_pool_management"),
),
},
},
})
}

func TestAccContainerCluster_withNodePoolAutoScaling(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckContainerClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccContainerCluster_withNodePoolAutoScaling(),
Check: resource.ComposeTestCheckFunc(
testAccCheckContainerCluster(
"google_container_cluster.with_node_pool_autoscaling"),
),
},
},
})
}

func testAccCheckContainerClusterDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config)

Expand Down Expand Up @@ -431,6 +482,9 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc {
{"master_auth.0.cluster_ca_certificate", cluster.MasterAuth.ClusterCaCertificate},
{"master_auth.0.password", cluster.MasterAuth.Password},
{"master_auth.0.username", cluster.MasterAuth.Username},
// issue_client_certificate is a flag and therefore is optional and not computed. The state is assumed by
// the existance of a client certificate on the cluster itself.
{"master_auth.0.issue_client_certificate", strconv.FormatBool(cluster.MasterAuth.ClientCertificate != "")},
{"zone", cluster.Zone},
{"cluster_ipv4_cidr", cluster.ClusterIpv4Cidr},
{"description", cluster.Description},
Expand Down Expand Up @@ -474,6 +528,28 @@ func testAccCheckContainerCluster(n string) resource.TestCheckFunc {
for i, np := range cluster.NodePools {
prefix := fmt.Sprintf("node_pool.%d.", i)
clusterTests = append(clusterTests, clusterTestField{prefix + "name", np.Name})

if np.Autoscaling != nil {
if np.Autoscaling.Enabled {
clusterTests = append(clusterTests,
clusterTestField{prefix + "autoscaling.0.min_node_count", strconv.FormatInt(np.Autoscaling.MinNodeCount, 10)},
clusterTestField{prefix + "autoscaling.0.max_node_count", strconv.FormatInt(np.Autoscaling.MaxNodeCount, 10)})
}
}

// Management blocks are returned by GCP regardless of whether or not they are submitted. Since the terraform
// attributes won't contain the mapped values unless they were provided in the template the tests will
// fail. We limit checking these fields unless they are part of the template itself
if np.Management != nil {
if _, ok := attributes[prefix+"management.0.auto_upgrade"]; ok {
clusterTests = append(clusterTests, clusterTestField{prefix + "management.0.auto_upgrade", strconv.FormatBool(np.Management.AutoUpgrade)})
}

if _, ok := attributes[prefix+"management.0.auto_repair"]; ok {
clusterTests = append(clusterTests, clusterTestField{prefix + "management.0.auto_repair", strconv.FormatBool(np.Management.AutoRepair)})
}
}

if np.Config != nil {
clusterTests = append(clusterTests,
clusterTestField{prefix + "node_config.0.machine_type", np.Config.MachineType},
Expand Down Expand Up @@ -635,6 +711,20 @@ resource "google_container_cluster" "with_master_auth" {
master_auth {
username = "mr.yoda"
password = "adoy.rm"
issue_client_certificate = true
}
}`, acctest.RandString(10))

var testAccContainerCluster_withMasterAuthNoClientCert = fmt.Sprintf(`
resource "google_container_cluster" "with_master_auth_no_client_cert" {
name = "cluster-test-%s"
zone = "us-central1-a"
initial_node_count = 3

master_auth {
username = "mr.yoda"
password = "adoy.rm"
issue_client_certificate = false
}
}`, acctest.RandString(10))

Expand Down Expand Up @@ -1029,3 +1119,81 @@ resource "google_container_cluster" "with_node_pool_node_config" {
}
`, testId, testId)
}

func testAccContainerCluster_withNodePoolManagement() string {
testId := acctest.RandString(10)
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool_management" {
name = "tf-cluster-nodepool-test-%s"
zone = "us-central1-a"
node_pool {
name = "tf-cluster-nodepool-test-%s"
node_count = 2
management {
auto_repair = true
auto_upgrade = true
}
node_config {
machine_type = "n1-standard-1"
disk_size_gb = 15
local_ssd_count = 1
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring"
]
service_account = "default"
metadata {
foo = "bar"
}
image_type = "COS"
labels {
foo = "bar"
}
tags = ["foo", "bar"]
}
}

}
`, testId, testId)
}

func testAccContainerCluster_withNodePoolAutoScaling() string {
testId := acctest.RandString(10)
return fmt.Sprintf(`
resource "google_container_cluster" "with_node_pool_autoscaling" {
name = "tf-cluster-nodepool-test-%s"
zone = "us-central1-a"
node_pool {
name = "tf-cluster-nodepool-test-%s"
node_count = 2
autoscaling {
min_node_count = 1
max_node_count = 5
}
node_config {
machine_type = "n1-standard-1"
disk_size_gb = 15
local_ssd_count = 1
oauth_scopes = [
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring"
]
service_account = "default"
metadata {
foo = "bar"
}
image_type = "COS"
labels {
foo = "bar"
}
tags = ["foo", "bar"]
}
}

}
`, testId, testId)
}
Loading