Skip to content

Commit

Permalink
Don't restart cluster on non-related configuration changes (#379) (#385)
Browse files Browse the repository at this point in the history
  • Loading branch information
alexott authored Nov 6, 2020
1 parent c28b209 commit e3236b5
Show file tree
Hide file tree
Showing 3 changed files with 40 additions and 18 deletions.
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
# Version changelog

## 0.2.9

* Fixed restarting cluster on changes in cluster configuration aren't related to the cluster configuration ([issue #379](https://github.com/databrickslabs/terraform-provider-databricks/issues/379))

## 0.2.8

* Added [databricks_workspace_conf](https://github.com/databrickslabs/terraform-provider-databricks/pull/398) resource
Expand Down
35 changes: 29 additions & 6 deletions compute/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (

var clusterSchema = resourceClusterSchema()

// ResourceCluster - returns Cluster resource description
func ResourceCluster() *schema.Resource {
return &schema.Resource{
SchemaVersion: 2,
Expand Down Expand Up @@ -282,6 +283,19 @@ func legacyReadLibraryListFromData(d *schema.ResourceData) (cll ClusterLibraryLi
return
}

func hasClusterConfigChanged(d *schema.ResourceData) bool {
for k := range clusterSchema {
// TODO: create a map if we'll add more non-cluster config parameters in the future
if k == "library" || k == "is_pinned" {
continue
}
if d.HasChange(k) {
return true
}
}
return false
}

func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error {
client := m.(*common.DatabricksClient)
clusters := NewClustersAPI(client)
Expand All @@ -291,18 +305,27 @@ func resourceClusterUpdate(d *schema.ResourceData, m interface{}) error {
if err != nil {
return err
}
modifyClusterRequest(&cluster)
clusterInfo, err := clusters.Edit(cluster)
if err != nil {
return err
var clusterInfo ClusterInfo
if hasClusterConfigChanged(d) {
log.Printf("[DEBUG] Cluster state has changed!")
modifyClusterRequest(&cluster)
clusterInfo, err = clusters.Edit(cluster)
if err != nil {
return err
}
} else {
clusterInfo, err = clusters.Get(clusterID)
if err != nil {
return err
}
}
oldPinned, newPinned := d.GetChange("is_pinned")
if oldPinned.(bool) != newPinned.(bool) {
log.Printf("[DEBUG] Update: is_pinned. Old: %v, New: %v", oldPinned, newPinned)
if newPinned.(bool) {
err = clusters.Pin(clusterInfo.ClusterID)
err = clusters.Pin(clusterID)
} else {
err = clusters.Unpin(clusterInfo.ClusterID)
err = clusters.Unpin(clusterID)
}
if err != nil {
return err
Expand Down
19 changes: 7 additions & 12 deletions compute/resource_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -826,18 +826,6 @@ func TestResourceClusterUpdateWithPinned(t *testing.T) {
LibraryStatuses: []LibraryStatus{},
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/edit",
ExpectedRequest: Cluster{
AutoterminationMinutes: 15,
ClusterID: "abc",
NumWorkers: 100,
ClusterName: "Shared Autoscaling",
SparkVersion: "7.1-scala12",
NodeTypeID: "i3.xlarge",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/pin",
Expand All @@ -854,6 +842,13 @@ func TestResourceClusterUpdateWithPinned(t *testing.T) {
ID: "abc",
Update: true,
Resource: ResourceCluster(),
InstanceState: map[string]string{
"autotermination_minutes": "15",
"cluster_name": "Shared Autoscaling",
"spark_version": "7.1-scala12",
"node_type_id": "i3.xlarge",
"num_workers": "100",
},
State: map[string]interface{}{
"autotermination_minutes": 15,
"cluster_name": "Shared Autoscaling",
Expand Down

0 comments on commit e3236b5

Please sign in to comment.