diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index de7332fbcb..6595b92a9f 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -270,14 +270,31 @@ func FixInstancePoolChangeIfAny(d *schema.ResourceData, cluster any) error { func SetForceSendFieldsForCluster(cluster any, d *schema.ResourceData) error { switch c := cluster.(type) { case *compute.ClusterSpec: + // Used in jobs. if c.Autoscale == nil { c.ForceSendFields = append(c.ForceSendFields, "NumWorkers") } + // Workload type is not relevant in jobs clusters. return nil case *compute.CreateCluster: if c.Autoscale == nil { c.ForceSendFields = append(c.ForceSendFields, "NumWorkers") } + // If workload type is set by the user, the fields within Clients should always be sent. + // These default to true if not set. + if c.WorkloadType != nil { + c.WorkloadType.Clients.ForceSendFields = []string{"Jobs", "Notebooks"} + } + return nil + case *compute.EditCluster: + if c.Autoscale == nil { + c.ForceSendFields = append(c.ForceSendFields, "NumWorkers") + } + // If workload type is set by the user, the fields within Clients should always be sent. + // These default to true if not set. + if c.WorkloadType != nil { + c.WorkloadType.Clients.ForceSendFields = []string{"Jobs", "Notebooks"} + } return nil default: return fmt.Errorf(unsupportedExceptCreateEditClusterSpecErr, cluster, "*", "*", "*") @@ -626,10 +643,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, c *commo Autoscale: cluster.Autoscale, }) } else { - if err != nil { - return err - } - cluster.ForceSendFields = []string{"NumWorkers"} + SetForceSendFieldsForCluster(&cluster, d) err = retry.RetryContext(ctx, 15*time.Minute, func() *retry.RetryError { _, err = clusters.Edit(ctx, cluster) diff --git a/internal/acceptance/cluster_test.go b/internal/acceptance/cluster_test.go index 45749a8a40..4dad55c9f7 100644 --- a/internal/acceptance/cluster_test.go +++ b/internal/acceptance/cluster_test.go @@ -3,6 +3,8 @@ package acceptance import ( "fmt" "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) func TestAccClusterResource_CreateClusterWithLibraries(t *testing.T) { @@ -129,3 +131,68 @@ func TestAccClusterResource_CreateAndNoWait(t *testing.T) { }`, }) } + +func TestAccClusterResource_WorkloadType(t *testing.T) { + workspaceLevel(t, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(""), + }, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(` + workload_type { + clients { + jobs = true + notebooks = true + } + }`), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "true"), + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "true"), + ), + }, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(` + workload_type { + clients { + jobs = false + notebooks = false + } + }`), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "false"), + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "false"), + ), + }, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(` + workload_type { + clients { } + }`), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "true"), + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "true"), + ), + }, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(``), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "true"), + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "true"), + ), + }) +} + +func testAccClusterResourceWorkloadTypeTemplate(workloadType string) string { + return fmt.Sprintf(` +data "databricks_spark_version" "latest" {} +resource "databricks_cluster" "this" { + cluster_name = "workload-{var.RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + autotermination_minutes = 10 + num_workers = 0 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" + } + custom_tags = { + "ResourceClass" = "SingleNode" + } + %s +}`, workloadType) +} diff --git a/internal/acceptance/init_test.go b/internal/acceptance/init_test.go index 9bcbb8efab..fb13d01567 100644 --- a/internal/acceptance/init_test.go +++ b/internal/acceptance/init_test.go @@ -166,7 +166,7 @@ func run(t *testing.T, steps []step) { if s.Template != "" { stepConfig = environmentTemplate(t, s.Template, vars) } - stepNum := i + stepNum := i + 1 thisStep := s stepCheck := thisStep.Check stepPreConfig := s.PreConfig