diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ac42672749..9b86f7eb98 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -109,13 +109,58 @@ provider_installation { After installing the necessary software for building provider from sources, you should be able to run `make coverage` to run the tests and see the coverage. -## Package organization for Providers +## Developing Resources or Data Sources using Plugin Framework + +### Package organization for Providers We are migrating the resource from SDKv2 to Plugin Framework provider and hence both of them exist in the codebase. For uniform code convention, readability and development, they are organized in the `internal/providers` directory under root as follows: - `providers`: Contains the changes that `depends` on both internal/providers/sdkv2 and internal/providers/pluginfw packages, eg: `GetProviderServer`. - `common`: Contains the changes `used by` both internal/providers/sdkv2 and internal/providers/pluginfw packages, eg: `ProviderName`. - `pluginfw`: Contains the changes specific to Plugin Framework. This package shouldn't depend on sdkv2 or common. - `sdkv2`: Contains the changes specific to SDKv2. This package shouldn't depend on pluginfw or common. +### Migrating resource to plugin framework +Ideally there shouldn't be any behaviour change when migrating a resource or data source to either Go SDk or Plugin Framework. - Please make sure there are no breaking differences due to changes in schema by running: `make diff-schema`. +- Integration tests shouldn't require any major changes. + + +### Code Organization +Each resource should go into it's separate package eg: `volume` package will contain both resource, data sources and other utils specific to volumes. Tests (both unit and integration tests) will also remain in this package. + +Note: Only Docs will stay under root docs/ directory. + + +### Code Conventions +1. Make sure the resource or data source implemented is of the right type: + ```golang + var _ resource.ResourceWithConfigure = &QualityMonitorResource{} + var _ datasource.DataSourceWithConfigure = &VolumesDataSource{} + ``` +2. To get the databricks client, `func (*common.DatabricksClient).GetWorkspaceClient()` or `func (*common.DatabricksClient).GetAccountClient()` will be used instead of directly using the underlying `WorkspaceClient()`, `AccountClient()` functions respectively. +3. Any method that returns only diagnostics should be called inline while appending diagnostics in response. Example: + ```golang + resp.Diagnostics.Append(req.Plan.Get(ctx, &monitorInfoTfSDK)...) + if resp.Diagnostics.HasError() { + return + } + ``` + is preferred over the following: + ```golang + diags := req.Plan.Get(ctx, &monitorInfoTfSDK) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + ``` +4. Any method returning an error should directly be followed by appending that to the diagnostics. + ```golang + err := method() + if err != nil { + resp.Diagnostics.AddError("message", err.Error()) + return + } + ``` +5. Any method returning a value alongside Diagnostics should also directly be followed by appending that to the diagnostics. + ## Debugging diff --git a/internal/providers/pluginfw/resources/README.md b/internal/providers/pluginfw/resources/README.md deleted file mode 100644 index 5dc70ff7d0..0000000000 --- a/internal/providers/pluginfw/resources/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Introduction -This document contains the good practices for any new resource or data source that will be introduced or migrated to the plugin framework. - - -## Migrating resource to plugin framework -Ideally there shouldn't be any behaviour change when migrating a resource or data source to either Go SDk or Plugin Framework. - Please make sure there are no breaking differences due to changes in schema by running: `make diff-schema`. -- Integration tests shouldn't require any major changes. - - -## Code Organization -Each resource should go into it's separate package eg: `volume` package will contain both resource, data sources and other utils specific to volumes. Tests (both unit and integration tests) will also remain in this package. - -Note: Only Docs will stay under root docs/ directory. - - -## Code Conventions -1. Make sure the resource or data source implemented is of the right type: - ```golang - var _ resource.ResourceWithConfigure = &QualityMonitorResource{} - var _ datasource.DataSourceWithConfigure = &VolumesDataSource{} - ``` -2. To get the databricks client, `func (*common.DatabricksClient).GetWorkspaceClient()` or `func (*common.DatabricksClient).GetAccountClient()` will be used instead of directly using the underlying `WorkspaceClient()`, `AccountClient()` functions respectively. -3. Any method that returns the diagnostics should be called inline while appending diagnostics in response. Example: - ```golang - resp.Diagnostics.Append(req.Plan.Get(ctx, &monitorInfoTfSDK)...) - if resp.Diagnostics.HasError() { - return - } - ``` - is preferred over the following: - ```golang - diags := req.Plan.Get(ctx, &monitorInfoTfSDK) - if diags.HasError() { - resp.Diagnostics.Append(diags...) - return - } - ``` -4. Any method returning an error should directly be followed by appending that to the diagnostics. \ No newline at end of file diff --git a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go index 7eba8d5c2a..4bbe242f99 100644 --- a/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go +++ b/internal/providers/pluginfw/resources/qualitymonitor/resource_quality_monitor.go @@ -29,6 +29,28 @@ func ResourceQualityMonitor() resource.Resource { return &QualityMonitorResource{} } +func waitForMonitor2(ctx context.Context, w *databricks.WorkspaceClient, monitor *catalog.MonitorInfo) diag.Diagnostics { + err := retry.RetryContext(ctx, qualityMonitorDefaultProvisionTimeout, func() *retry.RetryError { + newMonitor, err := w.QualityMonitors.GetByTableName(ctx, monitor.TableName) + *monitor = *newMonitor + if err != nil { + return retry.NonRetryableError(err) + } + + switch newMonitor.Status { + case catalog.MonitorInfoStatusMonitorStatusActive: + return nil + case catalog.MonitorInfoStatusMonitorStatusError, catalog.MonitorInfoStatusMonitorStatusFailed: + return retry.NonRetryableError(fmt.Errorf("monitor status returned %s for monitor: %s", newMonitor.Status, newMonitor.TableName)) + } + return retry.RetryableError(fmt.Errorf("monitor %s is still pending", newMonitor.TableName)) + }) + if err != nil { + return diag.Diagnostics{diag.NewErrorDiagnostic("failed to get monitor", err.Error())} + } + return nil +} + func waitForMonitor(ctx context.Context, w *databricks.WorkspaceClient, monitor *catalog.MonitorInfo) diag.Diagnostics { err := retry.RetryContext(ctx, qualityMonitorDefaultProvisionTimeout, func() *retry.RetryError { newMonitor, err := w.QualityMonitors.GetByTableName(ctx, monitor.TableName) @@ -139,6 +161,9 @@ func (r *QualityMonitorResource) Read(ctx context.Context, req resource.ReadRequ } endpoint, err := w.QualityMonitors.GetByTableName(ctx, getMonitor.TableName.ValueString()) if err != nil { + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + } resp.Diagnostics.AddError("failed to get monitor", err.Error()) return } diff --git a/internal/providers/pluginfw/resources/volume/data_volumes.go b/internal/providers/pluginfw/resources/volume/data_volumes.go index 1541426b20..764fa1b4e0 100644 --- a/internal/providers/pluginfw/resources/volume/data_volumes.go +++ b/internal/providers/pluginfw/resources/volume/data_volumes.go @@ -2,7 +2,9 @@ package volume import ( "context" + "fmt" + "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/common" pluginfwcommon "github.com/databricks/terraform-provider-databricks/internal/providers/pluginfw/common" @@ -61,7 +63,10 @@ func (d *VolumesDataSource) Read(ctx context.Context, req datasource.ReadRequest converters.TfSdkToGoSdkStruct(ctx, volumesList, &listVolumesRequest) volumes, err := w.Volumes.ListAll(ctx, listVolumesRequest) if err != nil { - resp.Diagnostics.AddError("Failed to get volumes for the catalog and schema", err.Error()) + if apierr.IsMissing(err) { + resp.State.RemoveResource(ctx) + } + resp.Diagnostics.AddError(fmt.Sprintf("Failed to get volumes for the catalog:%s and schema%s", listVolumesRequest.CatalogName, listVolumesRequest.SchemaName), err.Error()) return } for _, v := range volumes {