Skip to content

Commit

Permalink
Merge branch 'main' into issue-3855
Browse files Browse the repository at this point in the history
  • Loading branch information
mgyucht committed Aug 14, 2024
2 parents fbf84e1 + 81be591 commit 4547d07
Show file tree
Hide file tree
Showing 24 changed files with 3,116 additions and 2,112 deletions.
19 changes: 18 additions & 1 deletion clusters/resource_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,18 @@ package clusters

import (
"context"
"errors"
"fmt"
"log"
"strings"
"time"

"github.com/hashicorp/go-cty/cty"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"

"github.com/databricks/databricks-sdk-go/apierr"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/databricks/terraform-provider-databricks/common"
"github.com/databricks/terraform-provider-databricks/libraries"
Expand Down Expand Up @@ -604,7 +607,21 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, c *commo
return err
}
cluster.ForceSendFields = []string{"NumWorkers"}
_, err = clusters.Edit(ctx, cluster)

err = retry.RetryContext(ctx, 15*time.Minute, func() *retry.RetryError {
_, err = clusters.Edit(ctx, cluster)
if err == nil {
return nil
}
var apiErr *apierr.APIError
// Only Running and Terminated clusters can be modified. In particular, autoscaling clusters cannot be modified
// while the resizing is ongoing. We retry in this case. Scaling can take several minutes.
if errors.As(err, &apiErr) && apiErr.ErrorCode == "INVALID_STATE" {
return retry.RetryableError(fmt.Errorf("cluster %s cannot be modified in its current state", clusterId))
}
return retry.NonRetryableError(err)
})

}
if err != nil {
return err
Expand Down
96 changes: 96 additions & 0 deletions clusters/resource_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -965,6 +965,102 @@ func TestResourceClusterUpdate(t *testing.T) {
assert.Equal(t, "abc", d.Id(), "Id should be the same as in reading")
}

func TestResourceClusterUpdate_WhileScaling(t *testing.T) {
d, err := qa.ResourceFixture{
Fixtures: []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.1/clusters/get?cluster_id=abc",
ReuseRequest: true,
Response: compute.ClusterDetails{
ClusterId: "abc",
NumWorkers: 100,
ClusterName: "Shared Autoscaling",
SparkVersion: "7.1-scala12",
NodeTypeId: "i3.xlarge",
AutoterminationMinutes: 15,
State: compute.StateRunning,
},
},
{
Method: "POST",
Resource: "/api/2.1/clusters/events",
ExpectedRequest: compute.GetEvents{
ClusterId: "abc",
Limit: 1,
Order: compute.GetEventsOrderDesc,
EventTypes: []compute.EventType{compute.EventTypePinned, compute.EventTypeUnpinned},
},
Response: compute.GetEventsResponse{
Events: []compute.ClusterEvent{},
TotalCount: 0,
},
},
{
Method: "POST",
Resource: "/api/2.1/clusters/start",
ExpectedRequest: compute.StartCluster{
ClusterId: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/libraries/cluster-status?cluster_id=abc",
Response: compute.ClusterLibraryStatuses{
LibraryStatuses: []compute.LibraryFullStatus{},
},
},
{
Method: "POST",
Resource: "/api/2.1/clusters/edit",
ExpectedRequest: compute.ClusterDetails{
AutoterminationMinutes: 15,
ClusterId: "abc",
NumWorkers: 100,
ClusterName: "Shared Autoscaling",
SparkVersion: "7.1-scala12",
NodeTypeId: "i3.xlarge",
},
Response: common.APIErrorBody{
ErrorCode: "INVALID_STATE",
},
Status: 404,
},
{
Method: "POST",
Resource: "/api/2.1/clusters/edit",
ExpectedRequest: compute.ClusterDetails{
AutoterminationMinutes: 15,
ClusterId: "abc",
NumWorkers: 100,
ClusterName: "Shared Autoscaling",
SparkVersion: "7.1-scala12",
NodeTypeId: "i3.xlarge",
},
},
{
Method: "GET",
Resource: "/api/2.0/libraries/cluster-status?cluster_id=abc",
Response: compute.ClusterLibraryStatuses{
LibraryStatuses: []compute.LibraryFullStatus{},
},
},
},
ID: "abc",
Update: true,
Resource: ResourceCluster(),
State: map[string]any{
"autotermination_minutes": 15,
"cluster_name": "Shared Autoscaling",
"spark_version": "7.1-scala12",
"node_type_id": "i3.xlarge",
"num_workers": 100,
},
}.Apply(t)
assert.NoError(t, err)
assert.Equal(t, "abc", d.Id(), "Id should be the same as in reading")
}

func TestResourceClusterUpdateWithPinned(t *testing.T) {
d, err := qa.ResourceFixture{
Fixtures: []qa.HTTPFixture{
Expand Down
12 changes: 9 additions & 3 deletions docs/guides/experimental-exporter.md
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ Services are just logical groups of resources used for filtering and organizatio
* `pools` - **listing** [instance pools](../resources/instance_pool.md).
* `repos` - **listing** [databricks_repo](../resources/repo.md)
* `secrets` - **listing** [databricks_secret_scope](../resources/secret_scope.md) along with [keys](../resources/secret.md) and [ACLs](../resources/secret_acl.md).
* `settings` - **listing** [databricks_notification_destination](../resources/notification_destination.md).
* `sql-alerts` - **listing** [databricks_sql_alert](../resources/sql_alert.md).
* `sql-dashboards` - **listing** [databricks_sql_dashboard](../resources/sql_dashboard.md) along with associated [databricks_sql_widget](../resources/sql_widget.md) and [databricks_sql_visualization](../resources/sql_visualization.md).
* `sql-endpoints` - **listing** [databricks_sql_endpoint](../resources/sql_endpoint.md) along with [databricks_sql_global_config](../resources/sql_global_config.md).
Expand All @@ -145,6 +146,7 @@ Services are just logical groups of resources used for filtering and organizatio
* `uc-tables` - **listing** (*we can't list directly, only via dependencies to top-level object*) [databricks_sql_table](../resources/sql_table.md) resource.
* `uc-volumes` - **listing** (*we can't list directly, only via dependencies to top-level object*) [databricks_volume](../resources/volume.md)
* `users` - [databricks_user](../resources/user.md) and [databricks_service_principal](../resources/service_principal.md) are written to their own file, simply because of their amount. If you use SCIM provisioning, migrating workspaces is the only use case for importing `users` service.
* `vector-search` - **listing** exports [databricks_vector_search_endpoint](../resources/vector_search_endpoint.md) and [databricks_vector_search_index](../resources/vector_search_index.md)
* `workspace` - **listing** [databricks_workspace_conf](../resources/workspace_conf.md) and [databricks_global_init_script](../resources/global_init_script.md)

## Secrets
Expand Down Expand Up @@ -177,15 +179,15 @@ Exporter aims to generate HCL code for most of the resources within the Databric
| [databricks_dbfs_file](../resources/dbfs_file.md) | Yes | No | Yes | No |
| [databricks_external_location](../resources/external_location.md) | Yes | Yes | Yes | No |
| [databricks_file](../resources/file.md) | Yes | No | Yes | No |
| [databricks_global_init_script](../resources/global_init_script.md) | Yes | Yes | Yes | No |
| [databricks_global_init_script](../resources/global_init_script.md) | Yes | Yes | Yes\*\* | No |
| [databricks_grants](../resources/grants.md) | Yes | No | Yes | No |
| [databricks_group](../resources/group.md) | Yes | No | Yes | Yes |
| [databricks_group_instance_profile](../resources/group_instance_profile.md) | Yes | No | Yes | No |
| [databricks_group_member](../resources/group_member.md) | Yes | No | Yes | Yes |
| [databricks_group_role](../resources/group_role.md) | Yes | No | Yes | Yes |
| [databricks_instance_pool](../resources/instance_pool.md) | Yes | No | Yes | No |
| [databricks_instance_profile](../resources/instance_profile.md) | Yes | No | Yes | No |
| [databricks_ip_access_list](../resources/ip_access_list.md) | Yes | Yes | Yes | No |
| [databricks_ip_access_list](../resources/ip_access_list.md) | Yes | Yes | Yes\*\* | No |
| [databricks_job](../resources/job.md) | Yes | No | Yes | No |
| [databricks_library](../resources/library.md) | Yes\* | No | Yes | No |
| [databricks_metastore](../resources/metastore.md) | Yes | Yes | No | Yes |
Expand All @@ -196,6 +198,7 @@ Exporter aims to generate HCL code for most of the resources within the Databric
| [databricks_model_serving](../resources/model_serving) | Yes | Yes | Yes | No |
| [databricks_mws_permission_assignment](../resources/mws_permission_assignment.md) | Yes | No | No | Yes |
| [databricks_notebook](../resources/notebook.md) | Yes | Yes | Yes | No |
| [databricks_notification_destination](../resources/notification_destination.md) | Yes | No | Yes\*\* | No |
| [databricks_obo_token](../resources/obo_token.md) | Not Applicable | No | No | No |
| [databricks_online_table](../resources/online_table.md) | Yes | Yes | Yes | No |
| [databricks_permissions](../resources/permissions.md) | Yes | No | Yes | No |
Expand Down Expand Up @@ -225,11 +228,14 @@ Exporter aims to generate HCL code for most of the resources within the Databric
| [databricks_user](../resources/user.md) | Yes | No | Yes | Yes |
| [databricks_user_instance_profile](../resources/user_instance_profile.md) | No | No | No | No |
| [databricks_user_role](../resources/user_role.md) | Yes | No | Yes | Yes |
| [databricks_vector_search_endpoint](../resources/vector_search_endpoint.md) | Yes | No | Yes | No |
| [databricks_vector_search_index](../resources/vector_search_index.md) | Yes | No | Yes | No |
| [databricks_volume](../resources/volume.md) | Yes | Yes | Yes | No |
| [databricks_workspace_binding](../resources/workspace_binding.md) | Yes | No | Yes | No |
| [databricks_workspace_conf](../resources/workspace_conf.md) | Yes (partial) | No | Yes | No |
| [databricks_workspace_conf](../resources/workspace_conf.md) | Yes (partial) | No | Yes\*\* | No |
| [databricks_workspace_file](../resources/workspace_file.md) | Yes | Yes | Yes | No |

Notes:

* \* - libraries are exported as blocks inside the cluster definition instead of generating `databricks_library` resources. This is done to decrease the number of generated resources.
* \*\* - requires workspace admin permission.
2 changes: 2 additions & 0 deletions docs/resources/compliance_security_profile_setting.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ subcategory: "Settings"

-> **Note** This resource could be only used with workspace-level provider!

-> **Note** This setting can NOT be disabled once it is enabled.

The `databricks_compliance_security_profile_workspace_setting` resource allows you to control whether to enable the
compliance security profile for the current workspace. Enabling it on a workspace is permanent. By default, it is
turned off. This setting can NOT be disabled once it is enabled.
Expand Down
1 change: 1 addition & 0 deletions docs/resources/notification_destination.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,3 +97,4 @@ The following arguments are supported:
In addition to all arguments above, the following attributes are exported:

* `id` - The unique ID of the Notification Destination.
* `destination_type` - the type of Notification Destination.
81 changes: 77 additions & 4 deletions docs/resources/permissions.md
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,8 @@ resource "databricks_permissions" "dlt_usage" {

Valid [permission levels](https://docs.databricks.com/security/access-control/workspace-acl.html#notebook-permissions) for [databricks_notebook](notebook.md) are: `CAN_READ`, `CAN_RUN`, `CAN_EDIT`, and `CAN_MANAGE`.

A notebook could be specified by using either `notebook_path` or `notebook_id` attribute. The value for the `notebook_id` is the object ID of the resource in the Databricks Workspace that is exposed as `object_id` attribute of the `databricks_notebook` resource as shown below.

```hcl
resource "databricks_group" "auto" {
display_name = "Automation"
Expand All @@ -306,7 +308,7 @@ resource "databricks_notebook" "this" {
language = "PYTHON"
}
resource "databricks_permissions" "notebook_usage" {
resource "databricks_permissions" "notebook_usage_by_path" {
notebook_path = databricks_notebook.this.path
access_control {
Expand All @@ -324,12 +326,35 @@ resource "databricks_permissions" "notebook_usage" {
permission_level = "CAN_EDIT"
}
}
resource "databricks_permissions" "notebook_usage_by_id" {
notebook_id = databricks_notebook.this.object_id
access_control {
group_name = "users"
permission_level = "CAN_READ"
}
access_control {
group_name = databricks_group.auto.display_name
permission_level = "CAN_RUN"
}
access_control {
group_name = databricks_group.eng.display_name
permission_level = "CAN_EDIT"
}
}
```

-> **Note**: when importing a permissions resource, only the `notebook_id` is filled!

## Workspace file usage

Valid permission levels for [databricks_workspace_file](workspace_file.md) are: `CAN_READ`, `CAN_RUN`, `CAN_EDIT`, and `CAN_MANAGE`.

A workspace file could be specified by using either `workspace_file_path` or `workspace_file_id` attribute. The value for the `workspace_file_id` is the object ID of the resource in the Databricks Workspace that is exposed as `object_id` attribute of the `databricks_workspace_file` resource as shown below.

```hcl
resource "databricks_group" "auto" {
display_name = "Automation"
Expand All @@ -344,7 +369,7 @@ resource "databricks_workspace_file" "this" {
path = "/Production/ETL/Features.py"
}
resource "databricks_permissions" "workspace_file_usage" {
resource "databricks_permissions" "workspace_file_usage_by_path" {
workspace_file_path = databricks_workspace_file.this.path
access_control {
Expand All @@ -362,8 +387,29 @@ resource "databricks_permissions" "workspace_file_usage" {
permission_level = "CAN_EDIT"
}
}
resource "databricks_permissions" "workspace_file_usage_by_id" {
workspace_file_id = databricks_workspace_file.this.object_id
access_control {
group_name = "users"
permission_level = "CAN_READ"
}
access_control {
group_name = databricks_group.auto.display_name
permission_level = "CAN_RUN"
}
access_control {
group_name = databricks_group.eng.display_name
permission_level = "CAN_EDIT"
}
}
```

-> **Note**: when importing a permissions resource, only the `workspace_file_id` is filled!

## Folder usage

Valid [permission levels](https://docs.databricks.com/security/access-control/workspace-acl.html#folder-permissions) for folders of [databricks_directory](directory.md) are: `CAN_READ`, `CAN_RUN`, `CAN_EDIT`, and `CAN_MANAGE`. Notebooks and experiments in a folder inherit all permissions settings of that folder. For example, a user (or service principal) that has `CAN_RUN` permission on a folder has `CAN_RUN` permission on the notebooks in that folder.
Expand All @@ -373,6 +419,9 @@ Valid [permission levels](https://docs.databricks.com/security/access-control/wo
- All users (or service principals) have `CAN_MANAGE` permission for objects the user creates.
- User home directory - The user (or service principal) has `CAN_MANAGE` permission. All other users (or service principals) can list their directories.

A folder could be specified by using either `directory_path` or `directory_id` attribute. The value for the `directory_id` is the object ID of the resource in the Databricks Workspace that is exposed as `object_id` attribute of the `databricks_directory` resource as shown below.


```hcl
resource "databricks_group" "auto" {
display_name = "Automation"
Expand All @@ -386,9 +435,27 @@ resource "databricks_directory" "this" {
path = "/Production/ETL"
}
resource "databricks_permissions" "folder_usage" {
resource "databricks_permissions" "folder_usage_by_path" {
directory_path = databricks_directory.this.path
depends_on = [databricks_directory.this]
access_control {
group_name = "users"
permission_level = "CAN_READ"
}
access_control {
group_name = databricks_group.auto.display_name
permission_level = "CAN_RUN"
}
access_control {
group_name = databricks_group.eng.display_name
permission_level = "CAN_EDIT"
}
}
resource "databricks_permissions" "folder_usage_by_id" {
directory_id = databricks_directory.this.object_id
access_control {
group_name = "users"
Expand All @@ -407,6 +474,8 @@ resource "databricks_permissions" "folder_usage" {
}
```

-> **Note**: when importing a permissions resource, only the `directory_id` is filled!

## Repos usage

Valid [permission levels](https://docs.databricks.com/security/access-control/workspace-acl.html) for [databricks_repo](repo.md) are: `CAN_READ`, `CAN_RUN`, `CAN_EDIT`, and `CAN_MANAGE`.
Expand Down Expand Up @@ -623,6 +692,10 @@ resource "databricks_permissions" "token_usage" {

[SQL warehouses](https://docs.databricks.com/sql/user/security/access-control/sql-endpoint-acl.html) have four possible permissions: `CAN_USE`, `CAN_MONITOR`, `CAN_MANAGE` and `IS_OWNER`:

- The creator of a warehouse has `IS_OWNER` permission. Destroying `databricks_permissions` resource for a warehouse would revert ownership to the creator.
- A warehouse must have exactly one owner. If a resource is changed and no owner is specified, the currently authenticated principal would become the new owner of the warehouse. Nothing would change, per se, if the warehouse was created through Terraform.
- A warehouse cannot have a group as an owner.

```hcl
data "databricks_current_user" "me" {}
Expand Down
Loading

0 comments on commit 4547d07

Please sign in to comment.