diff --git a/clusters/clusters_api.go b/clusters/clusters_api.go index 9576ad1b08..96190a2ee2 100644 --- a/clusters/clusters_api.go +++ b/clusters/clusters_api.go @@ -14,6 +14,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // AutoScale is a struct the describes auto scaling for clusters @@ -448,6 +449,9 @@ func (cluster Cluster) Validate() error { func (cluster *Cluster) ModifyRequestOnInstancePool() { // Instance profile id does not exist or not set if cluster.InstancePoolID == "" { + // Worker must use an instance pool if driver uses an instance pool, + // therefore empty the computed value for driver instance pool. + cluster.DriverInstancePoolID = "" return } if cluster.AwsAttributes != nil { @@ -471,6 +475,17 @@ func (cluster *Cluster) ModifyRequestOnInstancePool() { cluster.DriverNodeTypeID = "" } +// https://github.com/databricks/terraform-provider-databricks/issues/824 +func (cluster *Cluster) FixInstancePoolChangeIfAny(d *schema.ResourceData) { + oldInstancePool, newInstancePool := d.GetChange("instance_pool_id") + oldDriverPool, newDriverPool := d.GetChange("driver_instance_pool_id") + if oldInstancePool != newInstancePool && + oldDriverPool == oldInstancePool && + oldDriverPool == newDriverPool { + cluster.DriverInstancePoolID = cluster.InstancePoolID + } +} + // ClusterList shows existing clusters type ClusterList struct { Clusters []ClusterInfo `json:"clusters,omitempty"` diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index 731860e58c..c01fb8b94b 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -225,17 +225,6 @@ func hasClusterConfigChanged(d *schema.ResourceData) bool { return false } -// https://github.com/databricks/terraform-provider-databricks/issues/824 -func fixInstancePoolChangeIfAny(d *schema.ResourceData, cluster *Cluster) { - oldInstancePool, newInstancePool := d.GetChange("instance_pool_id") - oldDriverPool, newDriverPool := d.GetChange("driver_instance_pool_id") - if oldInstancePool != newInstancePool && - oldDriverPool == oldInstancePool && - oldDriverPool == newDriverPool { - cluster.DriverInstancePoolID = cluster.InstancePoolID - } -} - func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { clusters := NewClustersAPI(ctx, c) clusterID := d.Id() @@ -250,7 +239,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, c *commo return err } cluster.ModifyRequestOnInstancePool() - fixInstancePoolChangeIfAny(d, &cluster) + cluster.FixInstancePoolChangeIfAny(d) // We can only call the resize api if the cluster is in the running state // and only the cluster size (ie num_workers OR autoscale) is being changed diff --git a/jobs/resource_job.go b/jobs/resource_job.go index 5d36755da2..731dcf8377 100644 --- a/jobs/resource_job.go +++ b/jobs/resource_job.go @@ -758,18 +758,21 @@ func (c controlRunStateLifecycleManager) OnUpdate(ctx context.Context) error { return api.StopActiveRun(jobID, c.d.Timeout(schema.TimeoutUpdate)) } -func prepareJobSettingsForUpdate(js JobSettings) { +func prepareJobSettingsForUpdate(d *schema.ResourceData, js JobSettings) { if js.NewCluster != nil { js.NewCluster.ModifyRequestOnInstancePool() + js.NewCluster.FixInstancePoolChangeIfAny(d) } for _, task := range js.Tasks { if task.NewCluster != nil { task.NewCluster.ModifyRequestOnInstancePool() + task.NewCluster.FixInstancePoolChangeIfAny(d) } } for _, jc := range js.JobClusters { if jc.NewCluster != nil { jc.NewCluster.ModifyRequestOnInstancePool() + jc.NewCluster.FixInstancePoolChangeIfAny(d) } } } @@ -851,7 +854,7 @@ func ResourceJob() *schema.Resource { ctx = context.WithValue(ctx, common.Api, common.API_2_1) } - prepareJobSettingsForUpdate(js) + prepareJobSettingsForUpdate(d, js) jobsAPI := NewJobsAPI(ctx, c) err := jobsAPI.Update(d.Id(), js) diff --git a/jobs/resource_job_test.go b/jobs/resource_job_test.go index 583fc02e87..858d2c7139 100644 --- a/jobs/resource_job_test.go +++ b/jobs/resource_job_test.go @@ -1573,6 +1573,112 @@ func TestResourceJobUpdate_NodeTypeToInstancePool(t *testing.T) { assert.Equal(t, "Featurizer New", d.Get("name")) } +func TestResourceJobUpdate_InstancePoolToNodeType(t *testing.T) { + d, err := qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.1/jobs/reset", + ExpectedRequest: UpdateJobRequest{ + JobID: 789, + NewSettings: &JobSettings{ + NewCluster: &clusters.Cluster{ + NodeTypeID: "node-type-id-1", + SparkVersion: "spark-1", + NumWorkers: 1, + }, + Tasks: []JobTaskSettings{ + { + NewCluster: &clusters.Cluster{ + NodeTypeID: "node-type-id-2", + SparkVersion: "spark-2", + NumWorkers: 2, + }, + }, + }, + JobClusters: []JobCluster{ + { + NewCluster: &clusters.Cluster{ + NodeTypeID: "node-type-id-3", + SparkVersion: "spark-3", + NumWorkers: 3, + }, + }, + }, + Name: "Featurizer New", + MaxRetries: 3, + MinRetryIntervalMillis: 5000, + RetryOnTimeout: true, + MaxConcurrentRuns: 1, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/jobs/get?job_id=789", + Response: Job{ + JobID: 789, + Settings: &JobSettings{ + NewCluster: &clusters.Cluster{ + NodeTypeID: "node-type-id", + DriverNodeTypeID: "driver-node-type-id", + InstancePoolID: "instance-pool-id-worker", + DriverInstancePoolID: "instance-pool-id-driver", + }, + Name: "Featurizer New", + MaxRetries: 3, + MinRetryIntervalMillis: 5000, + RetryOnTimeout: true, + MaxConcurrentRuns: 1, + }, + }, + }, + }, + ID: "789", + Update: true, + Resource: ResourceJob(), + InstanceState: map[string]string{ + "new_cluster.0.instance_pool_id": "instance-pool-id-worker", + "new_cluster.0.driver_instance_pool_id": "instance-pool-id-driver", + "new_cluster.0.node_type_id": "node-type-id-worker", + "task.0.new_cluster.0.node_type_id": "node-type-id-worker-task", + "task.0.instance_pool_id": "instance-pool-id-worker", + "task.0.driver_instance_pool_id": "instance-pool-id-driver", + "job_cluster.0.new_cluster.0.node_type_id": "node-type-id-worker-job", + "job_cluster.0.instance_pool_id": "instance-pool-id-worker", + "job_cluster.0.driver_instance_pool_id": "instance-pool-id-driver", + }, + HCL: ` + new_cluster = { + node_type_id = "node-type-id-1" + spark_version = "spark-1" + num_workers = 1 + } + task = { + new_cluster = { + node_type_id = "node-type-id-2" + spark_version = "spark-2" + num_workers = 2 + } + } + job_cluster = { + new_cluster = { + node_type_id = "node-type-id-3" + spark_version = "spark-3" + num_workers = 3 + } + } + max_concurrent_runs = 1 + max_retries = 3 + min_retry_interval_millis = 5000 + name = "Featurizer New" + retry_on_timeout = true`, + }.Apply(t) + assert.NoError(t, err) + assert.Equal(t, "789", d.Id(), "Id should be the same as in reading") + assert.Equal(t, "Featurizer New", d.Get("name")) +} + func TestResourceJobUpdate_Tasks(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{