Skip to content

Commit

Permalink
Merge branch 'main' into divyansh_notification_destinations
Browse files Browse the repository at this point in the history
  • Loading branch information
Divyansh-db committed Jul 24, 2024
2 parents ff7fdd1 + 01be651 commit 5735002
Show file tree
Hide file tree
Showing 30 changed files with 174 additions and 152 deletions.
19 changes: 19 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,24 @@
# Version changelog

## 1.49.1

### Bug Fixes
* Fixed reading of permissions for SQL objects ([#3800](https://github.com/databricks/terraform-provider-databricks/pull/3800)).
* don't update `databricks_metastore` during creation if not required ([#3783](https://github.com/databricks/terraform-provider-databricks/pull/3783)).

### Documentation
* Clarified schedule block in `databricks_job` ([#3805](https://github.com/databricks/terraform-provider-databricks/pull/3805)).
* Use correct names for isolation mode for storage credentials and external locations ([#3804](https://github.com/databricks/terraform-provider-databricks/pull/3804)).
* Fix incomplete note in databricks_workspace_binding resource ([#3806](https://github.com/databricks/terraform-provider-databricks/pull/3806))

### Internal Changes
* Refactored `databricks_zones` and `databricks_spark_versions` data sources to Go SDK ([#3687](https://github.com/databricks/terraform-provider-databricks/pull/3687)).

### Exporter
* Add support for exporting of Lakeview dashboards ([#3779](https://github.com/databricks/terraform-provider-databricks/pull/3779)).
* Adding more retries for SCIM API calls ([#3807](https://github.com/databricks/terraform-provider-databricks/pull/3807))


## 1.49.0

### New Features and Improvements
Expand Down
8 changes: 4 additions & 4 deletions access/resource_sql_permissions_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ var createHighConcurrencyCluster = []qa.HTTPFixture{
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/spark-versions",
Resource: "/api/2.1/clusters/spark-versions",
Response: compute.GetSparkVersionsResponse{
Versions: []compute.SparkVersion{
{
Expand All @@ -197,7 +197,7 @@ var createHighConcurrencyCluster = []qa.HTTPFixture{
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/list-node-types",
Resource: "/api/2.1/clusters/list-node-types",
Response: compute.ListNodeTypesResponse{
NodeTypes: []compute.NodeType{
{
Expand Down Expand Up @@ -261,7 +261,7 @@ var createSharedCluster = []qa.HTTPFixture{
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/spark-versions",
Resource: "/api/2.1/clusters/spark-versions",
Response: compute.GetSparkVersionsResponse{
Versions: []compute.SparkVersion{
{
Expand All @@ -274,7 +274,7 @@ var createSharedCluster = []qa.HTTPFixture{
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/list-node-types",
Resource: "/api/2.1/clusters/list-node-types",
Response: compute.ListNodeTypesResponse{
NodeTypes: []compute.NodeType{
{
Expand Down
2 changes: 1 addition & 1 deletion catalog/bindings/bindings.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)

func AddCurrentWorkspaceBindings(ctx context.Context, d *schema.ResourceData, w *databricks.WorkspaceClient, securableName string, securableType string) error {
func AddCurrentWorkspaceBindings(ctx context.Context, d *schema.ResourceData, w *databricks.WorkspaceClient, securableName string, securableType catalog.UpdateBindingsSecurableType) error {
if d.Get("isolation_mode") != "ISOLATED" && d.Get("isolation_mode") != "ISOLATION_MODE_ISOLATED" {
return nil
}
Expand Down
4 changes: 2 additions & 2 deletions catalog/resource_catalog.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ func ResourceCatalog() common.Resource {
}

// Bind the current workspace if the catalog is isolated, otherwise the read will fail
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, ci.Name, "catalog")
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, ci.Name, catalog.UpdateBindingsSecurableTypeCatalog)
},
Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
Expand Down Expand Up @@ -166,7 +166,7 @@ func ResourceCatalog() common.Resource {
d.SetId(ci.Name)

// Bind the current workspace if the catalog is isolated, otherwise the read will fail
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, ci.Name, "catalog")
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, ci.Name, catalog.UpdateBindingsSecurableTypeCatalog)
},
Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
Expand Down
4 changes: 2 additions & 2 deletions catalog/resource_external_location.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func ResourceExternalLocation() common.Resource {
}

// Bind the current workspace if the external location is isolated, otherwise the read will fail
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, el.Name, "external-location")
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, el.Name, catalog.UpdateBindingsSecurableTypeExternalLocation)
},
Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
w, err := c.WorkspaceClient()
Expand Down Expand Up @@ -134,7 +134,7 @@ func ResourceExternalLocation() common.Resource {
return err
}
// Bind the current workspace if the external location is isolated, otherwise the read will fail
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, updateExternalLocationRequest.Name, "external-location")
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, updateExternalLocationRequest.Name, catalog.UpdateBindingsSecurableTypeExternalLocation)
},
Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
force := d.Get("force_destroy").(bool)
Expand Down
2 changes: 1 addition & 1 deletion catalog/resource_external_location_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ func TestCreateIsolatedExternalLocation(t *testing.T) {
}, nil)
w.GetMockWorkspaceBindingsAPI().EXPECT().UpdateBindings(mock.Anything, catalog.UpdateWorkspaceBindingsParameters{
SecurableName: "abc",
SecurableType: "external-location",
SecurableType: "external_location",
Add: []catalog.WorkspaceBinding{
{
WorkspaceId: int64(123456789101112),
Expand Down
6 changes: 3 additions & 3 deletions catalog/resource_sql_table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1211,7 +1211,7 @@ func TestResourceSqlTableCreateTable_ExistingSQLWarehouse(t *testing.T) {
WarehouseId: "existingwarehouse",
OnWaitTimeout: sql.ExecuteStatementRequestOnWaitTimeoutCancel,
},
Response: sql.ExecuteStatementResponse{
Response: sql.StatementResponse{
StatementId: "statement1",
Status: &sql.StatementStatus{
State: "SUCCEEDED",
Expand Down Expand Up @@ -1247,7 +1247,7 @@ var baseClusterFixture = []qa.HTTPFixture{
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/spark-versions",
Resource: "/api/2.1/clusters/spark-versions",
Response: compute.GetSparkVersionsResponse{
Versions: []compute.SparkVersion{
{
Expand All @@ -1264,7 +1264,7 @@ var baseClusterFixture = []qa.HTTPFixture{
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/list-node-types",
Resource: "/api/2.1/clusters/list-node-types",
Response: compute.ListNodeTypesResponse{
NodeTypes: []compute.NodeType{
{
Expand Down
4 changes: 2 additions & 2 deletions catalog/resource_storage_credential.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ func ResourceStorageCredential() common.Resource {
return err
}
// Bind the current workspace if the storage credential is isolated, otherwise the read will fail
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, storageCredential.Name, "storage-credential")
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, storageCredential.Name, catalog.UpdateBindingsSecurableTypeStorageCredential)
})
},
Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
Expand Down Expand Up @@ -246,7 +246,7 @@ func ResourceStorageCredential() common.Resource {
return err
}
// Bind the current workspace if the storage credential is isolated, otherwise the read will fail
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, update.Name, "storage-credential")
return bindings.AddCurrentWorkspaceBindings(ctx, d, w, update.Name, catalog.UpdateBindingsSecurableTypeStorageCredential)
})
},
Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error {
Expand Down
2 changes: 1 addition & 1 deletion catalog/resource_storage_credential_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ func TestCreateIsolatedStorageCredential(t *testing.T) {
}, nil)
w.GetMockWorkspaceBindingsAPI().EXPECT().UpdateBindings(mock.Anything, catalog.UpdateWorkspaceBindingsParameters{
SecurableName: "a",
SecurableType: "storage-credential",
SecurableType: "storage_credential",
Add: []catalog.WorkspaceBinding{
{
WorkspaceId: int64(123456789101112),
Expand Down
8 changes: 4 additions & 4 deletions catalog/resource_workspace_binding.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func ResourceWorkspaceBinding() common.Resource {
Optional: true,
Default: "catalog",
}
common.CustomizeSchemaPath(m, "securable_type").SetValidateFunc(validation.StringInSlice([]string{"catalog", "external-location", "storage-credential"}, false))
common.CustomizeSchemaPath(m, "securable_type").SetValidateFunc(validation.StringInSlice([]string{"catalog", "external_location", "storage_credential"}, false))
common.CustomizeSchemaPath(m, "binding_type").SetDefault(catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite).SetValidateFunc(validation.StringInSlice([]string{
string(catalog.WorkspaceBindingBindingTypeBindingTypeReadWrite),
string(catalog.WorkspaceBindingBindingTypeBindingTypeReadOnly),
Expand All @@ -69,7 +69,7 @@ func ResourceWorkspaceBinding() common.Resource {
var update catalog.WorkspaceBinding
common.DataToStructPointer(d, workspaceBindingSchema, &update)
securableName := getSecurableName(d)
securableType := d.Get("securable_type").(string)
securableType := catalog.UpdateBindingsSecurableType(d.Get("securable_type").(string))
_, err = w.WorkspaceBindings.UpdateBindings(ctx, catalog.UpdateWorkspaceBindingsParameters{
Add: []catalog.WorkspaceBinding{update},
SecurableName: securableName,
Expand All @@ -88,7 +88,7 @@ func ResourceWorkspaceBinding() common.Resource {
return fmt.Errorf("incorrect binding id: %s. Correct format: <workspace_id>|<securable_type>|<securable_name>", d.Id())
}
securableName := parts[2]
securableType := parts[1]
securableType := catalog.GetBindingsSecurableType(parts[1])
workspaceId, err := strconv.ParseInt(parts[0], 10, 0)
if err != nil {
return fmt.Errorf("can't parse workspace_id: %w", err)
Expand Down Expand Up @@ -117,7 +117,7 @@ func ResourceWorkspaceBinding() common.Resource {
_, err = w.WorkspaceBindings.UpdateBindings(ctx, catalog.UpdateWorkspaceBindingsParameters{
Remove: []catalog.WorkspaceBinding{update},
SecurableName: getSecurableName(d),
SecurableType: d.Get("securable_type").(string),
SecurableType: catalog.UpdateBindingsSecurableType(d.Get("securable_type").(string)),
})
return err
},
Expand Down
6 changes: 3 additions & 3 deletions catalog/resource_workspace_binding_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ func TestSecurableWorkspaceBindings_CreateExtLocation(t *testing.T) {
},
},
SecurableName: "external_location",
SecurableType: "external-location",
SecurableType: catalog.UpdateBindingsSecurableTypeExternalLocation,
}).Return(&catalog.WorkspaceBindingsResponse{
Bindings: []catalog.WorkspaceBinding{
{
Expand All @@ -176,7 +176,7 @@ func TestSecurableWorkspaceBindings_CreateExtLocation(t *testing.T) {
},
},
}, nil)
e.GetBindingsBySecurableTypeAndSecurableName(mock.Anything, "external-location", "external_location").Return(&catalog.WorkspaceBindingsResponse{
e.GetBindingsBySecurableTypeAndSecurableName(mock.Anything, catalog.GetBindingsSecurableTypeExternalLocation, "external_location").Return(&catalog.WorkspaceBindingsResponse{
Bindings: []catalog.WorkspaceBinding{
{
WorkspaceId: int64(1234567890101112),
Expand All @@ -188,7 +188,7 @@ func TestSecurableWorkspaceBindings_CreateExtLocation(t *testing.T) {
Create: true,
HCL: `
securable_name = "external_location"
securable_type = "external-location"
securable_type = "external_location"
workspace_id = "1234567890101112"
`,
}.ApplyNoError(t)
Expand Down
22 changes: 11 additions & 11 deletions clusters/clusters_api_sdk_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ func TestStartClusterAndGetInfo_Pending(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Resource: "/api/2.1/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStatePending,
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Resource: "/api/2.1/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
ClusterID: "abc",
Expand All @@ -46,30 +46,30 @@ func TestStartClusterAndGetInfo_Terminating(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Resource: "/api/2.1/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateTerminating,
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Resource: "/api/2.1/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateTerminated,
ClusterID: "abc",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/start",
Resource: "/api/2.1/clusters/start",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Resource: "/api/2.1/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
ClusterID: "abc",
Expand All @@ -93,22 +93,22 @@ func TestStartClusterAndGetInfo_Error(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Resource: "/api/2.1/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateError,
StateMessage: "I am a teapot",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/start",
Resource: "/api/2.1/clusters/start",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
},
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Resource: "/api/2.1/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateRunning,
ClusterID: "abc",
Expand All @@ -132,15 +132,15 @@ func TestStartClusterAndGetInfo_StartingError(t *testing.T) {
client, server, err := qa.HttpFixtureClient(t, []qa.HTTPFixture{
{
Method: "GET",
Resource: "/api/2.0/clusters/get?cluster_id=abc",
Resource: "/api/2.1/clusters/get?cluster_id=abc",
Response: ClusterInfo{
State: ClusterStateError,
StateMessage: "I am a teapot",
},
},
{
Method: "POST",
Resource: "/api/2.0/clusters/start",
Resource: "/api/2.1/clusters/start",
ExpectedRequest: ClusterID{
ClusterID: "abc",
},
Expand Down
4 changes: 2 additions & 2 deletions clusters/clusters_api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func TestGetOrCreateRunningCluster_AzureAuth(t *testing.T) {
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/spark-versions",
Resource: "/api/2.1/clusters/spark-versions",
Response: compute.GetSparkVersionsResponse{
Versions: []compute.SparkVersion{
{
Expand All @@ -52,7 +52,7 @@ func TestGetOrCreateRunningCluster_AzureAuth(t *testing.T) {
{
Method: "GET",
ReuseRequest: true,
Resource: "/api/2.0/clusters/list-node-types",
Resource: "/api/2.1/clusters/list-node-types",
Response: compute.ListNodeTypesResponse{
NodeTypes: []compute.NodeType{
{
Expand Down
Loading

0 comments on commit 5735002

Please sign in to comment.