From a4b02256e80cc6b8a33160d63aca9363d20ed4ea Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 2 Sep 2024 11:42:08 -0400 Subject: [PATCH] [Fix] Correctly send workload_type fields in `databricks_cluster` to allow users to disable usage in certain contexts (#3972) ## Changes Migration of the `databricks_cluster` resource to use the Go SDK introduced a regression in the handling of the `workload_type` attribute, used to control what use-cases are permitted for a cluster. This occurred because when moving to the Go SDK, by default, falsy values (e.g. `jobs = false` in `workload_type`) are not serialized when sent to the REST API. This PR fixes this by setting ForceSendFields in the nested `Client` structure whenever `workload_type` is specified. Ultimate behavior should be: ``` (no workload_type block) # -> usable in both jobs and notebooks workload_type { client { jobs = true notebook = true } } # -> usable in both jobs and notebooks workload_type { client { jobs = false } } # -> not usable in jobs because disabled, usable in notebooks because omitted and true by default ``` Fixes #3689. ## Tests Added an integration test to verify this behavior. I paused the integration test after disabling usage in jobs & notebooks, and I verified I wasn't able to attach the cluster to a notebook. - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- clusters/resource_cluster.go | 22 ++++++++-- internal/acceptance/cluster_test.go | 67 +++++++++++++++++++++++++++++ internal/acceptance/init_test.go | 2 +- 3 files changed, 86 insertions(+), 5 deletions(-) diff --git a/clusters/resource_cluster.go b/clusters/resource_cluster.go index de7332fbcb..6595b92a9f 100644 --- a/clusters/resource_cluster.go +++ b/clusters/resource_cluster.go @@ -270,14 +270,31 @@ func FixInstancePoolChangeIfAny(d *schema.ResourceData, cluster any) error { func SetForceSendFieldsForCluster(cluster any, d *schema.ResourceData) error { switch c := cluster.(type) { case *compute.ClusterSpec: + // Used in jobs. if c.Autoscale == nil { c.ForceSendFields = append(c.ForceSendFields, "NumWorkers") } + // Workload type is not relevant in jobs clusters. return nil case *compute.CreateCluster: if c.Autoscale == nil { c.ForceSendFields = append(c.ForceSendFields, "NumWorkers") } + // If workload type is set by the user, the fields within Clients should always be sent. + // These default to true if not set. + if c.WorkloadType != nil { + c.WorkloadType.Clients.ForceSendFields = []string{"Jobs", "Notebooks"} + } + return nil + case *compute.EditCluster: + if c.Autoscale == nil { + c.ForceSendFields = append(c.ForceSendFields, "NumWorkers") + } + // If workload type is set by the user, the fields within Clients should always be sent. + // These default to true if not set. + if c.WorkloadType != nil { + c.WorkloadType.Clients.ForceSendFields = []string{"Jobs", "Notebooks"} + } return nil default: return fmt.Errorf(unsupportedExceptCreateEditClusterSpecErr, cluster, "*", "*", "*") @@ -626,10 +643,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, c *commo Autoscale: cluster.Autoscale, }) } else { - if err != nil { - return err - } - cluster.ForceSendFields = []string{"NumWorkers"} + SetForceSendFieldsForCluster(&cluster, d) err = retry.RetryContext(ctx, 15*time.Minute, func() *retry.RetryError { _, err = clusters.Edit(ctx, cluster) diff --git a/internal/acceptance/cluster_test.go b/internal/acceptance/cluster_test.go index 45749a8a40..4dad55c9f7 100644 --- a/internal/acceptance/cluster_test.go +++ b/internal/acceptance/cluster_test.go @@ -3,6 +3,8 @@ package acceptance import ( "fmt" "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" ) func TestAccClusterResource_CreateClusterWithLibraries(t *testing.T) { @@ -129,3 +131,68 @@ func TestAccClusterResource_CreateAndNoWait(t *testing.T) { }`, }) } + +func TestAccClusterResource_WorkloadType(t *testing.T) { + workspaceLevel(t, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(""), + }, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(` + workload_type { + clients { + jobs = true + notebooks = true + } + }`), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "true"), + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "true"), + ), + }, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(` + workload_type { + clients { + jobs = false + notebooks = false + } + }`), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "false"), + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "false"), + ), + }, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(` + workload_type { + clients { } + }`), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "true"), + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "true"), + ), + }, step{ + Template: testAccClusterResourceWorkloadTypeTemplate(``), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.jobs", "true"), + resource.TestCheckResourceAttr("databricks_cluster.this", "workload_type.0.clients.0.notebooks", "true"), + ), + }) +} + +func testAccClusterResourceWorkloadTypeTemplate(workloadType string) string { + return fmt.Sprintf(` +data "databricks_spark_version" "latest" {} +resource "databricks_cluster" "this" { + cluster_name = "workload-{var.RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + autotermination_minutes = 10 + num_workers = 0 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" + } + custom_tags = { + "ResourceClass" = "SingleNode" + } + %s +}`, workloadType) +} diff --git a/internal/acceptance/init_test.go b/internal/acceptance/init_test.go index 9bcbb8efab..fb13d01567 100644 --- a/internal/acceptance/init_test.go +++ b/internal/acceptance/init_test.go @@ -166,7 +166,7 @@ func run(t *testing.T, steps []step) { if s.Template != "" { stepConfig = environmentTemplate(t, s.Template, vars) } - stepNum := i + stepNum := i + 1 thisStep := s stepCheck := thisStep.Check stepPreConfig := s.PreConfig