Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support: wait_till for container_cluster resource #2232

Merged
merged 1 commit into from
Feb 9, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
259 changes: 122 additions & 137 deletions ibm/resource_ibm_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/helper/customdiff"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/helper/validation"

v1 "github.com/IBM-Cloud/bluemix-go/api/container/containerv1"
"github.com/IBM-Cloud/bluemix-go/bmxerror"
Expand Down Expand Up @@ -268,6 +269,14 @@ func resourceIBMContainerCluster() *schema.Resource {
Default: true,
Description: "Wait for worker node to update during kube version update.",
},
"wait_till": {
Type: schema.TypeString,
Optional: true,
Default: ingressReady,
DiffSuppressFunc: applyOnce,
ValidateFunc: validation.StringInSlice([]string{masterNodeReady, oneWorkerNodeReady, ingressReady}, true),
Description: "wait_till can be configured for Master Ready, One worker Ready or Ingress Ready",
},
"service_subnet": {
Type: schema.TypeString,
Optional: true,
Expand Down Expand Up @@ -628,6 +637,10 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{})
if v, ok := d.GetOkExists("public_service_endpoint"); ok {
params.PublicEndpointEnabled = v.(bool)
}
var timeoutStage string
if v, ok := d.GetOk("wait_till"); ok {
timeoutStage = v.(string)
}

targetEnv, err := getClusterTargetHeader(d, meta)
if err != nil {
Expand All @@ -639,11 +652,19 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{})
return err
}
d.SetId(cls.ID)
//wait for cluster availability
_, err = WaitForClusterCreation(d, meta, targetEnv)
if err != nil {
return fmt.Errorf(
"Error waiting for cluster (%s) to become ready: %s", d.Id(), err)
switch strings.ToLower(timeoutStage) {

case strings.ToLower(masterNodeReady):
_, err = waitForClusterMasterAvailable(d, meta)
if err != nil {
return err
}

case strings.ToLower(oneWorkerNodeReady):
_, err = waitForClusterOneWorkerAvailable(d, meta)
if err != nil {
return err
}
}
d.Set("force_delete_storage", d.Get("force_delete_storage").(bool))

Expand Down Expand Up @@ -962,7 +983,7 @@ func resourceIBMContainerClusterUpdate(d *schema.ResourceData, meta interface{})
}
}

if d.HasChange("labels") || d.IsNewResource() {
if d.HasChange("labels") {
workerPoolsAPI := csClient.WorkerPools()
workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv)
if err != nil {
Expand Down Expand Up @@ -1071,6 +1092,7 @@ func resourceIBMContainerClusterUpdate(d *schema.ResourceData, meta interface{})
if err != nil {
return fmt.Errorf("Error updating worker %s: %s", oldPack["id"].(string), err)
}

_, err = WaitForWorkerAvailable(d, meta, targetEnv)
if err != nil {
return fmt.Errorf(
Expand All @@ -1082,136 +1104,6 @@ func resourceIBMContainerClusterUpdate(d *schema.ResourceData, meta interface{})

}

/*if d.HasChange("workers") {
oldWorkers, newWorkers := d.GetChange("workers")
oldWorker := oldWorkers.([]interface{})
newWorker := newWorkers.([]interface{})
for _, nW := range newWorker {
newPack := nW.(map[string]interface{})
exists := false
for _, oW := range oldWorker {
oldPack := oW.(map[string]interface{})
if strings.Compare(newPack["name"].(string), oldPack["name"].(string)) == 0 {
exists = true
if strings.Compare(newPack["action"].(string), oldPack["action"].(string)) != 0 {
params := v1.WorkerUpdateParam{
Action: newPack["action"].(string),
}
err := wrkAPI.Update(clusterID, oldPack["id"].(string), params, targetEnv)
if err != nil {
return fmt.Errorf("Error updating worker %s: %s", oldPack["id"].(string), err)
}
_, err = WaitForWorkerAvailable(d, meta, targetEnv)
if err != nil {
return fmt.Errorf(
"Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err)
}
workerObj, err := wrkAPI.Get(oldPack["id"].(string), targetEnv)
if err != nil {
return fmt.Errorf("Error getting worker %s: %s", oldPack["id"].(string), err)
}
var worker = map[string]string{
"name": newPack["name"].(string),
"id": newPack["id"].(string),
"action": newPack["action"].(string),
"version": strings.Split(workerObj.KubeVersion, "_")[0],
}
workersInfo = append(workersInfo, worker)
} else if strings.Compare(newPack["version"].(string), oldPack["version"].(string)) != 0 {
cluster, err := clusterAPI.Find(clusterID, targetEnv)
if err != nil {
return fmt.Errorf("Error retrieving cluster %s: %s", clusterID, err)
}
if newPack["version"].(string) != strings.Split(cluster.MasterKubeVersion, "_")[0] {
return fmt.Errorf("Worker version %s should match the master kube version %s", newPack["version"].(string), strings.Split(cluster.MasterKubeVersion, "_")[0])
}
params := v1.WorkerUpdateParam{
Action: "update",
}
err = wrkAPI.Update(clusterID, oldPack["id"].(string), params, targetEnv)
if err != nil {
return fmt.Errorf("Error updating worker %s: %s", oldPack["id"].(string), err)
}
_, err = WaitForWorkerAvailable(d, meta, targetEnv)
if err != nil {
return fmt.Errorf(
"Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err)
}
workerObj, err := wrkAPI.Get(oldPack["id"].(string), targetEnv)
if err != nil {
return fmt.Errorf("Error getting worker %s: %s", oldPack["id"].(string), err)
}
var worker = map[string]string{
"name": newPack["name"].(string),
"id": newPack["id"].(string),
"action": newPack["action"].(string),
"version": strings.Split(workerObj.KubeVersion, "_")[0],
}
workersInfo = append(workersInfo, worker)

} else {
workerObj, err := wrkAPI.Get(oldPack["id"].(string), targetEnv)
if err != nil {
return fmt.Errorf("Error getting worker %s: %s", oldPack["id"].(string), err)
}
var worker = map[string]string{
"name": oldPack["name"].(string),
"id": oldPack["id"].(string),
"action": oldPack["action"].(string),
"version": strings.Split(workerObj.KubeVersion, "_")[0],
}
workersInfo = append(workersInfo, worker)
}
}
}
if !exists {
params := v1.WorkerParam{
Action: "add",
Count: 1,
}
err := wrkAPI.Add(clusterID, params, targetEnv)
if err != nil {
return fmt.Errorf("Error adding worker to cluster")
}
id, err := getID(d, meta, clusterID, oldWorker, workersInfo)
if err != nil {
return fmt.Errorf("Error getting id of worker")
}
_, err = WaitForWorkerAvailable(d, meta, targetEnv)
if err != nil {
return fmt.Errorf(
"Error waiting for workers of cluster (%s) to become ready: %s", d.Id(), err)
}
workerObj, err := wrkAPI.Get(id, targetEnv)
if err != nil {
return fmt.Errorf("Error getting worker %s: %s", id, err)
}
var worker = map[string]string{
"name": newPack["name"].(string),
"id": id,
"action": newPack["action"].(string),
"version": strings.Split(workerObj.KubeVersion, "_")[0],
}
workersInfo = append(workersInfo, worker)
}
}
for _, oW := range oldWorker {
oldPack := oW.(map[string]interface{})
exists := false
for _, nW := range newWorker {
newPack := nW.(map[string]interface{})
exists = exists || (strings.Compare(oldPack["name"].(string), newPack["name"].(string)) == 0)
}
if !exists {
wrkAPI.Delete(clusterID, oldPack["id"].(string), targetEnv)
}

}
//wait for new workers to available
//Done - Can we not put WaitForWorkerAvailable after all client.DeleteWorker
d.Set("workers", workersInfo)
}*/

//TODO put webhooks can't deleted in the error message if such case is observed in the chnages
if d.HasChange("webhook") {
oldHooks, newHooks := d.GetChange("webhook")
Expand Down Expand Up @@ -1276,7 +1168,7 @@ func resourceIBMContainerClusterUpdate(d *schema.ResourceData, meta interface{})
}
}
}
if publicSubnetAdded {
if publicSubnetAdded && d.Get("wait_till").(string) == ingressReady {
_, err = WaitForSubnetAvailable(d, meta, targetEnv)
if err != nil {
return fmt.Errorf(
Expand Down Expand Up @@ -1430,6 +1322,99 @@ func clusterStateRefreshFunc(client v1.Clusters, instanceID string, target v1.Cl
}
}

// waitForClusterMasterAvailable Waits for cluster creation
func waitForClusterMasterAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) {
targetEnv, err := getClusterTargetHeader(d, meta)
if err != nil {
return nil, err
}
csClient, err := meta.(ClientSession).ContainerAPI()
if err != nil {
return nil, err
}
clusterID := d.Id()

stateConf := &resource.StateChangeConf{
Pending: []string{deployRequested, deployInProgress},
Target: []string{ready},
Refresh: func() (interface{}, string, error) {
clusterFields, err := csClient.Clusters().FindWithOutShowResourcesCompatible(clusterID, targetEnv)
if err != nil {
return nil, "", fmt.Errorf("Error retrieving cluster: %s", err)
}

if clusterFields.MasterStatus == ready {
return clusterFields, ready, nil
}
return clusterFields, deployInProgress, nil
},
Timeout: d.Timeout(schema.TimeoutCreate),
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}

return stateConf.WaitForState()
}

// waitForClusterOneWorkerAvailable Waits for cluster creation
func waitForClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) {
targetEnv, err := getClusterTargetHeader(d, meta)
if err != nil {
return nil, err
}
csClient, err := meta.(ClientSession).ContainerAPI()
if err != nil {
return nil, err
}
clusterID := d.Id()

stateConf := &resource.StateChangeConf{
Pending: []string{"retry", "deploying", "provisioning"},
Target: []string{normal},
Refresh: func() (interface{}, string, error) {

workerPoolsAPI := csClient.WorkerPools()
workerPools, err := workerPoolsAPI.ListWorkerPools(clusterID, targetEnv)
if err != nil {
return nil, "", err
}
var poolName string
var poolContains bool

if len(workerPools) > 0 && workerPoolContains(workerPools, defaultWorkerPool) {
poolName = defaultWorkerPool
poolContains = true
} else if len(workerPools) > 0 && workerPoolContains(workerPools, computeWorkerPool) && workerPoolContains(workerPools, gatewayWorkerpool) {
poolName = computeWorkerPool
poolContains = true
}
if poolContains {
wrkAPI := csClient.Workers()
workersByPool, err := wrkAPI.ListByWorkerPool(clusterID, poolName, false, targetEnv)
if err != nil {
return nil, "", fmt.Errorf("Error retrieving workers of default worker pool for cluster: %s", err)
}
if len(workersByPool) == 0 {
return workersByPool, "provisioning", nil
}
for _, worker := range workersByPool {

if worker.State == normal {
return workersByPool, normal, nil
}
}
return workersByPool, "deploying", nil
}
return nil, normal, nil
},
Timeout: d.Timeout(schema.TimeoutCreate),
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}

return stateConf.WaitForState()
}

// WaitForWorkerAvailable Waits for worker creation
func WaitForWorkerAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) {
csClient, err := meta.(ClientSession).ContainerAPI()
Expand Down
7 changes: 7 additions & 0 deletions website/docs/r/container_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,13 @@ The following arguments are supported:
(b) If you want to connect your worker nodes to a private VLAN only, do not specify this option.
* `pod_subnet` - (Optional, Forces new resource,String) Specify a custom subnet CIDR to provide private IP addresses for pods. The subnet must be at least '/23' or larger. For more info, refer [here](https://cloud.ibm.com/docs/containers?topic=containers-cli-plugin-kubernetes-service-cli#pod-subnet).
* `service_subnet` - (Optional, Forces new resource,String) Specify a custom subnet CIDR to provide private IP addresses for services. The subnet must be at least '/24' or larger. For more info, refer [here](https://cloud.ibm.com/docs/containers?topic=containers-cli-plugin-kubernetes-service-cli#service-subnet).
* `wait_till` - (Optional, String) The cluster creation happens in multi-stages. To avoid the longer wait times for resource execution, this field is introduced.
Resource will wait for only the specified stage and complete execution. The supported stages are
- *MasterNodeReady*: resource will wait till the master node is ready
- *OneWorkerNodeReady*: resource will wait till atleast one worker node becomes to ready state
- *IngressReady*: resource will wait till the ingress-host and ingress-secret are available.

Default value: IngressReady
* `private_vlan_id` - (Optional, Forces new resource, string) The private VLAN of the worker node. You can retrieve the value by running the ibmcloud ks vlans <data-center> command in the IBM Cloud CLI.
* Free clusters: You must not specify any private VLAN. Your free cluster is automatically connected to a private VLAN that is owned by IBM.
* Standard clusters:<br/>
Expand Down