-
Notifications
You must be signed in to change notification settings - Fork 1.8k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Expose additional properties during cluster creation #384
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -55,6 +55,12 @@ func resourceContainerCluster() *schema.Resource { | |
Type: schema.TypeString, | ||
Computed: true, | ||
}, | ||
"issue_client_certificate": { | ||
Type: schema.TypeBool, | ||
Optional: true, | ||
ForceNew: true, | ||
Default: true, | ||
}, | ||
"password": { | ||
Type: schema.TypeString, | ||
Required: true, | ||
|
@@ -237,6 +243,30 @@ func resourceContainerCluster() *schema.Resource { | |
ForceNew: true, // TODO(danawillow): Add ability to add/remove nodePools | ||
Elem: &schema.Resource{ | ||
Schema: map[string]*schema.Schema{ | ||
"autoscaling": { | ||
Type: schema.TypeList, | ||
Optional: true, | ||
ForceNew: true, | ||
MaxItems: 1, | ||
Elem: &schema.Resource{ | ||
Schema: map[string]*schema.Schema{ | ||
"min_node_count": { | ||
Type: schema.TypeInt, | ||
Optional: true, | ||
ForceNew: true, | ||
ValidateFunc: validation.IntAtLeast(1), | ||
}, | ||
|
||
"max_node_count": { | ||
Type: schema.TypeInt, | ||
Optional: true, | ||
ForceNew: true, | ||
ValidateFunc: validation.IntAtLeast(1), | ||
}, | ||
}, | ||
}, | ||
}, | ||
|
||
"initial_node_count": { | ||
Type: schema.TypeInt, | ||
Optional: true, | ||
|
@@ -245,6 +275,32 @@ func resourceContainerCluster() *schema.Resource { | |
Deprecated: "Use node_count instead", | ||
}, | ||
|
||
"management": { | ||
Type: schema.TypeList, | ||
Optional: true, | ||
ForceNew: true, | ||
Computed: true, | ||
MaxItems: 1, | ||
Elem: &schema.Resource{ | ||
Schema: map[string]*schema.Schema{ | ||
|
||
"auto_repair": { | ||
Type: schema.TypeBool, | ||
Optional: true, | ||
Computed: true, | ||
ForceNew: true, | ||
}, | ||
|
||
"auto_upgrade": { | ||
Type: schema.TypeBool, | ||
Optional: true, | ||
Computed: true, | ||
ForceNew: true, | ||
}, | ||
}, | ||
}, | ||
}, | ||
|
||
"node_count": { | ||
Type: schema.TypeInt, | ||
Optional: true, | ||
|
@@ -304,6 +360,12 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er | |
Password: masterAuth["password"].(string), | ||
Username: masterAuth["username"].(string), | ||
} | ||
|
||
if v, ok := masterAuth["issue_client_certificate"]; ok { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: since you set a default, you can remove this if statement |
||
cluster.MasterAuth.ClientCertificateConfig = &container.ClientCertificateConfig{ | ||
IssueClientCertificate: v.(bool), | ||
} | ||
} | ||
} | ||
|
||
if v, ok := d.GetOk("node_version"); ok { | ||
|
@@ -413,6 +475,47 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er | |
nodePool.Config = expandNodeConfig(v) | ||
} | ||
|
||
if v, ok := d.GetOk(prefix + ".autoscaling"); ok { | ||
autoscalingConfig := v.([]interface{})[0].(map[string]interface{}) | ||
nodePool.Autoscaling = &container.NodePoolAutoscaling{} | ||
|
||
// default behavior is disabled. Set to true as the cluster has it defined giving the intent to enable | ||
// it | ||
nodePool.Autoscaling.Enabled = true | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: you can put this in the block on line 480 |
||
|
||
var minNodeCount int | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't think you need this var |
||
minNodeCount = 1 | ||
|
||
if v, ok := autoscalingConfig["min_node_count"]; ok { | ||
minNodeCount = v.(int) | ||
nodePool.Autoscaling.MinNodeCount = int64(minNodeCount) | ||
} | ||
|
||
if v, ok := autoscalingConfig["max_node_count"]; ok { | ||
var maxNodeCount int | ||
maxNodeCount = v.(int) | ||
|
||
if maxNodeCount < minNodeCount { | ||
return fmt.Errorf("Cannot set autoscaling option max_node_count to less than the min_node_count value on nodepool %d", i) | ||
} | ||
|
||
nodePool.Autoscaling.MaxNodeCount = int64(maxNodeCount) | ||
} | ||
} | ||
|
||
if v, ok := d.GetOk(prefix + ".management"); ok { | ||
managementConfig := v.([]interface{})[0].(map[string]interface{}) | ||
nodePool.Management = &container.NodeManagement{} | ||
|
||
if v, ok := managementConfig["auto_repair"]; ok { | ||
nodePool.Management.AutoRepair = v.(bool) | ||
} | ||
|
||
if v, ok := managementConfig["auto_upgrade"]; ok { | ||
nodePool.Management.AutoUpgrade = v.(bool) | ||
} | ||
} | ||
|
||
nodePools = append(nodePools, nodePool) | ||
} | ||
cluster.NodePools = nodePools | ||
|
@@ -483,6 +586,15 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro | |
"cluster_ca_certificate": cluster.MasterAuth.ClusterCaCertificate, | ||
}, | ||
} | ||
|
||
// we might not get this value back on existing clusters, however we can insinuate that | ||
// if the client certificate doesn't exist the cluster was created with this set to false. | ||
if cluster.MasterAuth.ClientCertificateConfig != nil { | ||
masterAuth[0]["issue_client_certificate"] = cluster.MasterAuth.ClientCertificateConfig.IssueClientCertificate | ||
} else { | ||
masterAuth[0]["issue_client_certificate"] = cluster.MasterAuth.ClientCertificate != "" | ||
} | ||
|
||
d.Set("master_auth", masterAuth) | ||
|
||
d.Set("initial_node_count", cluster.InitialNodeCount) | ||
|
@@ -778,6 +890,25 @@ func flattenClusterNodePools(d *schema.ResourceData, config *Config, c []*contai | |
"node_count": size / len(np.InstanceGroupUrls), | ||
"node_config": flattenClusterNodeConfig(np.Config), | ||
} | ||
|
||
if np.Management != nil { | ||
nodePool["management"] = []map[string]interface{}{ | ||
{ | ||
"auto_repair": np.Management.AutoRepair, | ||
"auto_upgrade": np.Management.AutoUpgrade, | ||
}, | ||
} | ||
} | ||
|
||
if np.Autoscaling != nil { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. you probably also want to check that np.Autoscaling.Enabled |
||
nodePool["autoscaling"] = []map[string]interface{}{ | ||
{ | ||
"min_node_count": np.Autoscaling.MinNodeCount, | ||
"max_node_count": np.Autoscaling.MaxNodeCount, | ||
}, | ||
} | ||
} | ||
|
||
nodePools = append(nodePools, nodePool) | ||
} | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm pretty sure the API requires these to be set, no? https://github.com/terraform-providers/terraform-provider-google/blob/master/google/resource_container_node_pool.go#L78 has it as
Required