Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Wait until container cluster can be operated on. #927

Merged
merged 1 commit into from
Jul 9, 2019
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
56 changes: 43 additions & 13 deletions google-beta/resource_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -1091,7 +1091,14 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er
}
}

return resourceContainerClusterRead(d, meta)
if err := resourceContainerClusterRead(d, meta); err != nil {
return err
}

if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutCreate)); err != nil {
return err
}
return nil
}

func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error {
Expand All @@ -1107,21 +1114,15 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro
return err
}

cluster := &containerBeta.Cluster{}
err = resource.Retry(2*time.Minute, func() *resource.RetryError {
name := containerClusterFullName(project, location, d.Get("name").(string))
cluster, err = config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do()
if err != nil {
return resource.NonRetryableError(err)
}
if cluster.Status != "RUNNING" {
return resource.RetryableError(fmt.Errorf("Cluster %q has status %q with message %q", d.Get("name"), cluster.Status, cluster.StatusMessage))
}
return nil
})
clusterName := d.Get("name").(string)
name := containerClusterFullName(project, location, clusterName)
cluster, err := config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do()
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string)))
}
if cluster.Status == "ERROR" || cluster.Status == "DEGRADED" {
return fmt.Errorf("Cluster %q has status %q with message %q", d.Get("name"), cluster.Status, cluster.StatusMessage)
}

d.Set("name", cluster.Name)
if err := d.Set("network_policy", flattenNetworkPolicy(cluster.NetworkPolicy)); err != nil {
Expand Down Expand Up @@ -1246,6 +1247,10 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er
clusterName := d.Get("name").(string)
timeoutInMinutes := int(d.Timeout(schema.TimeoutUpdate).Minutes())

if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutUpdate)); err != nil {
return err
}

d.Partial(true)

lockKey := containerClusterMutexKey(project, location, clusterName)
Expand Down Expand Up @@ -1875,6 +1880,10 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er
clusterName := d.Get("name").(string)
timeoutInMinutes := int(d.Timeout(schema.TimeoutDelete).Minutes())

if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutDelete)); err != nil {
return err
}

log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string))
mutexKV.Lock(containerClusterMutexKey(project, location, clusterName))
defer mutexKV.Unlock(containerClusterMutexKey(project, location, clusterName))
Expand Down Expand Up @@ -1953,6 +1962,24 @@ func cleanFailedContainerCluster(d *schema.ResourceData, meta interface{}) error
return nil
}

func waitForContainerClusterReady(config *Config, project, location, clusterName string, timeout time.Duration) error {
return resource.Retry(timeout, func() *resource.RetryError {
name := containerClusterFullName(project, location, clusterName)
cluster, err := config.clientContainerBeta.Projects.Locations.Clusters.Get(name).Do()
if err != nil {
return resource.NonRetryableError(err)
}
if cluster.Status == "PROVISIONING" || cluster.Status == "RECONCILING" || cluster.Status == "STOPPING" {
return resource.RetryableError(fmt.Errorf("Cluster %q has status %q with message %q", clusterName, cluster.Status, cluster.StatusMessage))
} else if cluster.Status == "RUNNING" {
log.Printf("Cluster %q has status 'RUNNING'.", clusterName)
return nil
} else {
return resource.NonRetryableError(fmt.Errorf("Cluster %q has terminal state %q with message %q.", clusterName, cluster.Status, cluster.StatusMessage))
}
})
}

// container engine's API currently mistakenly returns the instance group manager's
// URL instead of the instance group's URL in its responses. This shim detects that
// error, and corrects it, by fetching the instance group manager URL and retrieving
Expand Down Expand Up @@ -2612,6 +2639,9 @@ func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interfac

d.Set("name", clusterName)
d.SetId(clusterName)
if err := waitForContainerClusterReady(config, project, location, clusterName, d.Timeout(schema.TimeoutCreate)); err != nil {
return nil, err
}

return []*schema.ResourceData{d}, nil
}
Expand Down