From a6e54e13db43b40bdb62b79428dae5c6de7d1ebe Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Thu, 4 Jul 2024 14:27:07 +0200 Subject: [PATCH 1/3] Add codegen template for tfsdk struct (#3713) * Add codegen template for tfsdk struct * updated map and list * - * remove tf type for list and map * comments * added test again --- .codegen.json | 3 + .codegen/model.go.tmpl | 85 + .gitattributes | 18 + go.mod | 1 + go.sum | 2 + service/billing_tf/model.go | 540 ++++ service/catalog_tf/model.go | 4224 ++++++++++++++++++++++++++++++ service/compute_tf/model.go | 3936 ++++++++++++++++++++++++++++ service/dashboards_tf/model.go | 443 ++++ service/files_tf/model.go | 266 ++ service/iam_tf/model.go | 1068 ++++++++ service/jobs_tf/model.go | 3466 ++++++++++++++++++++++++ service/marketplace_tf/model.go | 1619 ++++++++++++ service/ml_tf/model.go | 2257 ++++++++++++++++ service/oauth2_tf/model.go | 246 ++ service/pipelines_tf/model.go | 1279 +++++++++ service/provisioning_tf/model.go | 1103 ++++++++ service/serving_tf/model.go | 1670 ++++++++++++ service/settings_tf/model.go | 1493 +++++++++++ service/sharing_tf/model.go | 1042 ++++++++ service/sql_tf/model.go | 3144 ++++++++++++++++++++++ service/vectorsearch_tf/model.go | 641 +++++ service/workspace_tf/model.go | 931 +++++++ 23 files changed, 29477 insertions(+) create mode 100644 .codegen/model.go.tmpl create mode 100755 service/billing_tf/model.go create mode 100755 service/catalog_tf/model.go create mode 100755 service/compute_tf/model.go create mode 100755 service/dashboards_tf/model.go create mode 100755 service/files_tf/model.go create mode 100755 service/iam_tf/model.go create mode 100755 service/jobs_tf/model.go create mode 100755 service/marketplace_tf/model.go create mode 100755 service/ml_tf/model.go create mode 100755 service/oauth2_tf/model.go create mode 100755 service/pipelines_tf/model.go create mode 100755 service/provisioning_tf/model.go create mode 100755 service/serving_tf/model.go create mode 100755 service/settings_tf/model.go create mode 100755 service/sharing_tf/model.go create mode 100755 service/sql_tf/model.go create mode 100755 service/vectorsearch_tf/model.go create mode 100755 service/workspace_tf/model.go diff --git a/.codegen.json b/.codegen.json index fd308f7ef7..6d8bf0496f 100644 --- a/.codegen.json +++ b/.codegen.json @@ -1,5 +1,8 @@ { "formatter": "make fmt", + "packages": { + ".codegen/model.go.tmpl": "service/{{.Name}}_tf/model.go" + }, "version": { "common/version.go": "version = \"$VERSION\"" }, diff --git a/.codegen/model.go.tmpl b/.codegen/model.go.tmpl new file mode 100644 index 0000000000..5271402f02 --- /dev/null +++ b/.codegen/model.go.tmpl @@ -0,0 +1,85 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package {{.Name}}_tf + +import ( + {{range .ImportedPackages}} + "github.com/databricks/databricks-sdk-go/service/{{.}}"{{end}} + "github.com/databricks/databricks-sdk-go/service/{{.Name}}" + "io" + "github.com/databricks/databricks-sdk-go/marshal" + "github.com/hashicorp/terraform-plugin-framework/types" +) +{{range .Types}} +{{- if or .Fields .IsEmpty}} +{{.Comment "// " 80}} +type {{.PascalName}} struct { + {{- range .Fields}} + {{.Comment " // " 80}} + {{.PascalName}} {{if .IsOptionalObject}}*{{end}}{{template "type" .Entity}} `{{template "field-tag" . }}`{{end}} +} + +{{else if .MapValue}}{{.Comment "// " 80}} +type {{.PascalName}} {{template "type" .}} +{{else if .Enum}}{{.Comment "// " 80}} +type {{.PascalName}} string +{{range .Enum }} +{{.Comment "// " 80}} +const {{.Entity.PascalName}}{{.PascalName}} {{.Entity.PascalName}} = `{{.Content}}`{{end}} + +// String representation for [fmt.Print] +func (f *{{.PascalName}}) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *{{.PascalName}}) Set(v string) error { + switch v { + case {{range $i, $e := .Enum }}{{if $i}}, {{end}}`{{.Content}}`{{end}}: + *f = {{.PascalName}}(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of {{range $i, $e := .Enum }}{{if $i}}, {{end}}"{{.Content}}"{{end}}`, v) + } +} + +// Type always returns {{.PascalName}} to satisfy [pflag.Value] interface +func (f *{{.PascalName}}) Type() string { + return "{{.PascalName}}" +} +{{end}} +{{end}} + +{{- define "field-tag" -}} + {{if .IsJson}}tfsdk:"{{.Name}}"{{else}}tfsdk:"-"{{end -}} + {{if .IsPath}} url:"-"{{end -}} + {{if .IsHeader}} url:"-" header:"{{.Name}}{{if not .Required}},omitempty{{end}}"{{end -}} + {{if .IsQuery}} url:"{{.Name}}{{if not .Required}},omitempty{{end}}"{{end -}} +{{- end -}} + +{{- define "type" -}} + {{- if not . }}any /* ERROR */ + {{- else if .IsExternal }}{{.Package.Name}}.{{.PascalName}} + {{- else if .IsAny}}any + {{- else if .IsEmpty}}{{.PascalName}} + {{- else if .IsString}}types.String + {{- else if .IsBool}}types.Bool + {{- else if .IsInt64}}types.Int64 + {{- else if .IsFloat64}}types.Float64 + {{- else if .IsInt}}types.Int64 + {{- else if .IsByteStream}}io.ReadCloser + {{- else if .ArrayValue }}[]{{template "type" .ArrayValue}} + {{- else if .MapValue }}map[string]{{template "type" .MapValue}} + {{- else if .IsObject }}{{.PascalName}} + {{- else if .Enum }}{{.PascalName}} + {{- else}}any /* MISSING TYPE */ + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index e69de29bb2..d804f8e73e 100755 --- a/.gitattributes +++ b/.gitattributes @@ -0,0 +1,18 @@ +service/billing_tf/model.go linguist-generated=true +service/catalog_tf/model.go linguist-generated=true +service/compute_tf/model.go linguist-generated=true +service/dashboards_tf/model.go linguist-generated=true +service/files_tf/model.go linguist-generated=true +service/iam_tf/model.go linguist-generated=true +service/jobs_tf/model.go linguist-generated=true +service/marketplace_tf/model.go linguist-generated=true +service/ml_tf/model.go linguist-generated=true +service/oauth2_tf/model.go linguist-generated=true +service/pipelines_tf/model.go linguist-generated=true +service/provisioning_tf/model.go linguist-generated=true +service/serving_tf/model.go linguist-generated=true +service/settings_tf/model.go linguist-generated=true +service/sharing_tf/model.go linguist-generated=true +service/sql_tf/model.go linguist-generated=true +service/vectorsearch_tf/model.go linguist-generated=true +service/workspace_tf/model.go linguist-generated=true diff --git a/go.mod b/go.mod index 5a56fb699e..436c0fad28 100644 --- a/go.mod +++ b/go.mod @@ -48,6 +48,7 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-json v0.22.1 // indirect + github.com/hashicorp/terraform-plugin-framework v1.9.0 // indirect github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect diff --git a/go.sum b/go.sum index d2e63b3423..a3cec01721 100644 --- a/go.sum +++ b/go.sum @@ -130,6 +130,8 @@ github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVW github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= +github.com/hashicorp/terraform-plugin-framework v1.9.0 h1:caLcDoxiRucNi2hk8+j3kJwkKfvHznubyFsJMWfZqKU= +github.com/hashicorp/terraform-plugin-framework v1.9.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= diff --git a/service/billing_tf/model.go b/service/billing_tf/model.go new file mode 100755 index 0000000000..529f490b3c --- /dev/null +++ b/service/billing_tf/model.go @@ -0,0 +1,540 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package billing_tf + +import ( + "fmt" + "io" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Budget configuration to be created. +type Budget struct { + Alerts []BudgetAlert `tfsdk:"alerts"` + // Optional end date of the budget. + EndDate types.String `tfsdk:"end_date"` + // SQL-like filter expression with workspaceId, SKU and tag. Usage in your + // account that matches this expression will be counted in this budget. + // + // Supported properties on left-hand side of comparison: * `workspaceId` - + // the ID of the workspace * `sku` - SKU of the cluster, e.g. + // `STANDARD_ALL_PURPOSE_COMPUTE` * `tag.tagName`, `tag.'tag name'` - tag of + // the cluster + // + // Supported comparison operators: * `=` - equal * `!=` - not equal + // + // Supported logical operators: `AND`, `OR`. + // + // Examples: * `workspaceId=123 OR (sku='STANDARD_ALL_PURPOSE_COMPUTE' AND + // tag.'my tag'='my value')` * `workspaceId!=456` * + // `sku='STANDARD_ALL_PURPOSE_COMPUTE' OR sku='PREMIUM_ALL_PURPOSE_COMPUTE'` + // * `tag.name1='value1' AND tag.name2='value2'` + Filter types.String `tfsdk:"filter"` + // Human-readable name of the budget. + Name types.String `tfsdk:"name"` + // Period length in years, months, weeks and/or days. Examples: `1 month`, + // `30 days`, `1 year, 2 months, 1 week, 2 days` + Period types.String `tfsdk:"period"` + // Start date of the budget period calculation. + StartDate types.String `tfsdk:"start_date"` + // Target amount of the budget per period in USD. + TargetAmount types.String `tfsdk:"target_amount"` +} + +type BudgetAlert struct { + // List of email addresses to be notified when budget percentage is exceeded + // in the given period. + EmailNotifications []types.String `tfsdk:"email_notifications"` + // Percentage of the target amount used in the currect period that will + // trigger a notification. + MinPercentage types.Int64 `tfsdk:"min_percentage"` +} + +// List of budgets. +type BudgetList struct { + Budgets []BudgetWithStatus `tfsdk:"budgets"` +} + +// Budget configuration with daily status. +type BudgetWithStatus struct { + Alerts []BudgetAlert `tfsdk:"alerts"` + + BudgetId types.String `tfsdk:"budget_id"` + + CreationTime types.String `tfsdk:"creation_time"` + // Optional end date of the budget. + EndDate types.String `tfsdk:"end_date"` + // SQL-like filter expression with workspaceId, SKU and tag. Usage in your + // account that matches this expression will be counted in this budget. + // + // Supported properties on left-hand side of comparison: * `workspaceId` - + // the ID of the workspace * `sku` - SKU of the cluster, e.g. + // `STANDARD_ALL_PURPOSE_COMPUTE` * `tag.tagName`, `tag.'tag name'` - tag of + // the cluster + // + // Supported comparison operators: * `=` - equal * `!=` - not equal + // + // Supported logical operators: `AND`, `OR`. + // + // Examples: * `workspaceId=123 OR (sku='STANDARD_ALL_PURPOSE_COMPUTE' AND + // tag.'my tag'='my value')` * `workspaceId!=456` * + // `sku='STANDARD_ALL_PURPOSE_COMPUTE' OR sku='PREMIUM_ALL_PURPOSE_COMPUTE'` + // * `tag.name1='value1' AND tag.name2='value2'` + Filter types.String `tfsdk:"filter"` + // Human-readable name of the budget. + Name types.String `tfsdk:"name"` + // Period length in years, months, weeks and/or days. Examples: `1 month`, + // `30 days`, `1 year, 2 months, 1 week, 2 days` + Period types.String `tfsdk:"period"` + // Start date of the budget period calculation. + StartDate types.String `tfsdk:"start_date"` + // Amount used in the budget for each day (noncumulative). + StatusDaily []BudgetWithStatusStatusDailyItem `tfsdk:"status_daily"` + // Target amount of the budget per period in USD. + TargetAmount types.String `tfsdk:"target_amount"` + + UpdateTime types.String `tfsdk:"update_time"` +} + +type BudgetWithStatusStatusDailyItem struct { + // Amount used in this day in USD. + Amount types.String `tfsdk:"amount"` + + Date types.String `tfsdk:"date"` +} + +type CreateLogDeliveryConfigurationParams struct { + // The optional human-readable name of the log delivery configuration. + // Defaults to empty. + ConfigName types.String `tfsdk:"config_name"` + // The ID for a method:credentials/create that represents the AWS IAM role + // with policy and trust relationship as described in the main billable + // usage documentation page. See [Configure billable usage delivery]. + // + // [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + CredentialsId types.String `tfsdk:"credentials_id"` + // The optional delivery path prefix within Amazon S3 storage. Defaults to + // empty, which means that logs are delivered to the root of the bucket. + // This must be a valid S3 object key. This must not start or end with a + // slash character. + DeliveryPathPrefix types.String `tfsdk:"delivery_path_prefix"` + // This field applies only if `log_type` is `BILLABLE_USAGE`. This is the + // optional start month and year for delivery, specified in `YYYY-MM` + // format. Defaults to current year and month. `BILLABLE_USAGE` logs are not + // available for usage before March 2019 (`2019-03`). + DeliveryStartTime types.String `tfsdk:"delivery_start_time"` + // Log delivery type. Supported values are: + // + // * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the + // CSV schema, see the [View billable usage]. + // + // * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, + // see [Configure audit logging] + // + // [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + // [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + LogType LogType `tfsdk:"log_type"` + // The file type of log delivery. + // + // * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the + // CSV (comma-separated values) format is supported. For the schema, see the + // [View billable usage] * If `log_type` is `AUDIT_LOGS`, this value must be + // `JSON`. Only the JSON (JavaScript Object Notation) format is supported. + // For the schema, see the [Configuring audit logs]. + // + // [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + OutputFormat OutputFormat `tfsdk:"output_format"` + // Status of log delivery configuration. Set to `ENABLED` (enabled) or + // `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable + // the configuration](#operation/patch-log-delivery-config-status) later. + // Deletion of a configuration is not supported, so disable a log delivery + // configuration that is no longer needed. + Status LogDeliveryConfigStatus `tfsdk:"status"` + // The ID for a method:storage/create that represents the S3 bucket with + // bucket policy as described in the main billable usage documentation page. + // See [Configure billable usage delivery]. + // + // [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + StorageConfigurationId types.String `tfsdk:"storage_configuration_id"` + // Optional filter that specifies workspace IDs to deliver logs for. By + // default the workspace filter is empty and log delivery applies at the + // account level, delivering workspace-level logs for all workspaces in your + // account, plus account level logs. You can optionally set this field to an + // array of workspace IDs (each one is an `int64`) to which log delivery + // should apply, in which case only workspace-level logs relating to the + // specified workspaces are delivered. If you plan to use different log + // delivery configurations for different workspaces, set this field + // explicitly. Be aware that delivery configurations mentioning specific + // workspaces won't apply to new workspaces created in the future, and + // delivery won't include account level logs. For some types of Databricks + // deployments there is only one workspace per account ID, so this field is + // unnecessary. + WorkspaceIdsFilter []types.Int64 `tfsdk:"workspace_ids_filter"` +} + +// Delete budget +type DeleteBudgetRequest struct { + // Budget ID + BudgetId types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// The status string for log delivery. Possible values are: * `CREATED`: There +// were no log delivery attempts since the config was created. * `SUCCEEDED`: +// The latest attempt of log delivery has succeeded completely. * +// `USER_FAILURE`: The latest attempt of log delivery failed because of +// misconfiguration of customer provided permissions on role or storage. * +// `SYSTEM_FAILURE`: The latest attempt of log delivery failed because of an +// Databricks internal error. Contact support if it doesn't go away soon. * +// `NOT_FOUND`: The log delivery status as the configuration has been disabled +// since the release of this feature or there are no workspaces in the account. +type DeliveryStatus string + +// There were no log delivery attempts since the config was created. +const DeliveryStatusCreated DeliveryStatus = `CREATED` + +// The log delivery status as the configuration has been disabled since the +// release of this feature or there are no workspaces in the account. +const DeliveryStatusNotFound DeliveryStatus = `NOT_FOUND` + +// The latest attempt of log delivery has succeeded completely. +const DeliveryStatusSucceeded DeliveryStatus = `SUCCEEDED` + +// The latest attempt of log delivery failed because of an internal +// error. Contact support if it doesn't go away soon. +const DeliveryStatusSystemFailure DeliveryStatus = `SYSTEM_FAILURE` + +// The latest attempt of log delivery failed because of misconfiguration of +// customer provided permissions on role or storage. +const DeliveryStatusUserFailure DeliveryStatus = `USER_FAILURE` + +// String representation for [fmt.Print] +func (f *DeliveryStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeliveryStatus) Set(v string) error { + switch v { + case `CREATED`, `NOT_FOUND`, `SUCCEEDED`, `SYSTEM_FAILURE`, `USER_FAILURE`: + *f = DeliveryStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CREATED", "NOT_FOUND", "SUCCEEDED", "SYSTEM_FAILURE", "USER_FAILURE"`, v) + } +} + +// Type always returns DeliveryStatus to satisfy [pflag.Value] interface +func (f *DeliveryStatus) Type() string { + return "DeliveryStatus" +} + +// Return billable usage logs +type DownloadRequest struct { + // Format: `YYYY-MM`. Last month to return billable usage logs for. This + // field is required. + EndMonth types.String `tfsdk:"-" url:"end_month"` + // Specify whether to include personally identifiable information in the + // billable usage logs, for example the email addresses of cluster creators. + // Handle this information with care. Defaults to false. + PersonalData types.Bool `tfsdk:"-" url:"personal_data,omitempty"` + // Format: `YYYY-MM`. First month to return billable usage logs for. This + // field is required. + StartMonth types.String `tfsdk:"-" url:"start_month"` +} + +type DownloadResponse struct { + Contents io.ReadCloser `tfsdk:"-"` +} + +// Get budget and its status +type GetBudgetRequest struct { + // Budget ID + BudgetId types.String `tfsdk:"-" url:"-"` +} + +// Get log delivery configuration +type GetLogDeliveryRequest struct { + // Databricks log delivery configuration ID + LogDeliveryConfigurationId types.String `tfsdk:"-" url:"-"` +} + +// Get all log delivery configurations +type ListLogDeliveryRequest struct { + // Filter by credential configuration ID. + CredentialsId types.String `tfsdk:"-" url:"credentials_id,omitempty"` + // Filter by status `ENABLED` or `DISABLED`. + Status LogDeliveryConfigStatus `tfsdk:"-" url:"status,omitempty"` + // Filter by storage configuration ID. + StorageConfigurationId types.String `tfsdk:"-" url:"storage_configuration_id,omitempty"` +} + +// Status of log delivery configuration. Set to `ENABLED` (enabled) or +// `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable the +// configuration](#operation/patch-log-delivery-config-status) later. Deletion +// of a configuration is not supported, so disable a log delivery configuration +// that is no longer needed. +type LogDeliveryConfigStatus string + +const LogDeliveryConfigStatusDisabled LogDeliveryConfigStatus = `DISABLED` + +const LogDeliveryConfigStatusEnabled LogDeliveryConfigStatus = `ENABLED` + +// String representation for [fmt.Print] +func (f *LogDeliveryConfigStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LogDeliveryConfigStatus) Set(v string) error { + switch v { + case `DISABLED`, `ENABLED`: + *f = LogDeliveryConfigStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISABLED", "ENABLED"`, v) + } +} + +// Type always returns LogDeliveryConfigStatus to satisfy [pflag.Value] interface +func (f *LogDeliveryConfigStatus) Type() string { + return "LogDeliveryConfigStatus" +} + +type LogDeliveryConfiguration struct { + // The Databricks account ID that hosts the log delivery configuration. + AccountId types.String `tfsdk:"account_id"` + // Databricks log delivery configuration ID. + ConfigId types.String `tfsdk:"config_id"` + // The optional human-readable name of the log delivery configuration. + // Defaults to empty. + ConfigName types.String `tfsdk:"config_name"` + // Time in epoch milliseconds when the log delivery configuration was + // created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // The ID for a method:credentials/create that represents the AWS IAM role + // with policy and trust relationship as described in the main billable + // usage documentation page. See [Configure billable usage delivery]. + // + // [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + CredentialsId types.String `tfsdk:"credentials_id"` + // The optional delivery path prefix within Amazon S3 storage. Defaults to + // empty, which means that logs are delivered to the root of the bucket. + // This must be a valid S3 object key. This must not start or end with a + // slash character. + DeliveryPathPrefix types.String `tfsdk:"delivery_path_prefix"` + // This field applies only if `log_type` is `BILLABLE_USAGE`. This is the + // optional start month and year for delivery, specified in `YYYY-MM` + // format. Defaults to current year and month. `BILLABLE_USAGE` logs are not + // available for usage before March 2019 (`2019-03`). + DeliveryStartTime types.String `tfsdk:"delivery_start_time"` + // Databricks log delivery status. + LogDeliveryStatus *LogDeliveryStatus `tfsdk:"log_delivery_status"` + // Log delivery type. Supported values are: + // + // * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the + // CSV schema, see the [View billable usage]. + // + // * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, + // see [Configure audit logging] + // + // [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + // [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + LogType LogType `tfsdk:"log_type"` + // The file type of log delivery. + // + // * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the + // CSV (comma-separated values) format is supported. For the schema, see the + // [View billable usage] * If `log_type` is `AUDIT_LOGS`, this value must be + // `JSON`. Only the JSON (JavaScript Object Notation) format is supported. + // For the schema, see the [Configuring audit logs]. + // + // [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + // [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + OutputFormat OutputFormat `tfsdk:"output_format"` + // Status of log delivery configuration. Set to `ENABLED` (enabled) or + // `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable + // the configuration](#operation/patch-log-delivery-config-status) later. + // Deletion of a configuration is not supported, so disable a log delivery + // configuration that is no longer needed. + Status LogDeliveryConfigStatus `tfsdk:"status"` + // The ID for a method:storage/create that represents the S3 bucket with + // bucket policy as described in the main billable usage documentation page. + // See [Configure billable usage delivery]. + // + // [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + StorageConfigurationId types.String `tfsdk:"storage_configuration_id"` + // Time in epoch milliseconds when the log delivery configuration was + // updated. + UpdateTime types.Int64 `tfsdk:"update_time"` + // Optional filter that specifies workspace IDs to deliver logs for. By + // default the workspace filter is empty and log delivery applies at the + // account level, delivering workspace-level logs for all workspaces in your + // account, plus account level logs. You can optionally set this field to an + // array of workspace IDs (each one is an `int64`) to which log delivery + // should apply, in which case only workspace-level logs relating to the + // specified workspaces are delivered. If you plan to use different log + // delivery configurations for different workspaces, set this field + // explicitly. Be aware that delivery configurations mentioning specific + // workspaces won't apply to new workspaces created in the future, and + // delivery won't include account level logs. For some types of Databricks + // deployments there is only one workspace per account ID, so this field is + // unnecessary. + WorkspaceIdsFilter []types.Int64 `tfsdk:"workspace_ids_filter"` +} + +// Databricks log delivery status. +type LogDeliveryStatus struct { + // The UTC time for the latest log delivery attempt. + LastAttemptTime types.String `tfsdk:"last_attempt_time"` + // The UTC time for the latest successful log delivery. + LastSuccessfulAttemptTime types.String `tfsdk:"last_successful_attempt_time"` + // Informative message about the latest log delivery attempt. If the log + // delivery fails with USER_FAILURE, error details will be provided for + // fixing misconfigurations in cloud permissions. + Message types.String `tfsdk:"message"` + // The status string for log delivery. Possible values are: * `CREATED`: + // There were no log delivery attempts since the config was created. * + // `SUCCEEDED`: The latest attempt of log delivery has succeeded completely. + // * `USER_FAILURE`: The latest attempt of log delivery failed because of + // misconfiguration of customer provided permissions on role or storage. * + // `SYSTEM_FAILURE`: The latest attempt of log delivery failed because of an + // Databricks internal error. Contact support if it doesn't go away soon. * + // `NOT_FOUND`: The log delivery status as the configuration has been + // disabled since the release of this feature or there are no workspaces in + // the account. + Status DeliveryStatus `tfsdk:"status"` +} + +// Log delivery type. Supported values are: +// +// * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the CSV +// schema, see the [View billable usage]. +// +// * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, see +// [Configure audit logging] +// +// [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html +// [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html +// [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html +// [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html +type LogType string + +const LogTypeAuditLogs LogType = `AUDIT_LOGS` + +const LogTypeBillableUsage LogType = `BILLABLE_USAGE` + +// String representation for [fmt.Print] +func (f *LogType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LogType) Set(v string) error { + switch v { + case `AUDIT_LOGS`, `BILLABLE_USAGE`: + *f = LogType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUDIT_LOGS", "BILLABLE_USAGE"`, v) + } +} + +// Type always returns LogType to satisfy [pflag.Value] interface +func (f *LogType) Type() string { + return "LogType" +} + +// The file type of log delivery. +// +// * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the CSV +// (comma-separated values) format is supported. For the schema, see the [View +// billable usage] * If `log_type` is `AUDIT_LOGS`, this value must be `JSON`. +// Only the JSON (JavaScript Object Notation) format is supported. For the +// schema, see the [Configuring audit logs]. +// +// [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html +// [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html +type OutputFormat string + +const OutputFormatCsv OutputFormat = `CSV` + +const OutputFormatJson OutputFormat = `JSON` + +// String representation for [fmt.Print] +func (f *OutputFormat) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *OutputFormat) Set(v string) error { + switch v { + case `CSV`, `JSON`: + *f = OutputFormat(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CSV", "JSON"`, v) + } +} + +// Type always returns OutputFormat to satisfy [pflag.Value] interface +func (f *OutputFormat) Type() string { + return "OutputFormat" +} + +type PatchStatusResponse struct { +} + +type UpdateLogDeliveryConfigurationStatusRequest struct { + // Databricks log delivery configuration ID + LogDeliveryConfigurationId types.String `tfsdk:"-" url:"-"` + // Status of log delivery configuration. Set to `ENABLED` (enabled) or + // `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable + // the configuration](#operation/patch-log-delivery-config-status) later. + // Deletion of a configuration is not supported, so disable a log delivery + // configuration that is no longer needed. + Status LogDeliveryConfigStatus `tfsdk:"status"` +} + +type UpdateResponse struct { +} + +type WrappedBudget struct { + // Budget configuration to be created. + Budget Budget `tfsdk:"budget"` + // Budget ID + BudgetId types.String `tfsdk:"-" url:"-"` +} + +type WrappedBudgetWithStatus struct { + // Budget configuration with daily status. + Budget BudgetWithStatus `tfsdk:"budget"` +} + +type WrappedCreateLogDeliveryConfiguration struct { + LogDeliveryConfiguration *CreateLogDeliveryConfigurationParams `tfsdk:"log_delivery_configuration"` +} + +type WrappedLogDeliveryConfiguration struct { + LogDeliveryConfiguration *LogDeliveryConfiguration `tfsdk:"log_delivery_configuration"` +} + +type WrappedLogDeliveryConfigurations struct { + LogDeliveryConfigurations []LogDeliveryConfiguration `tfsdk:"log_delivery_configurations"` +} diff --git a/service/catalog_tf/model.go b/service/catalog_tf/model.go new file mode 100755 index 0000000000..384a948364 --- /dev/null +++ b/service/catalog_tf/model.go @@ -0,0 +1,4224 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package catalog_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AccountsCreateMetastore struct { + MetastoreInfo *CreateMetastore `tfsdk:"metastore_info"` +} + +type AccountsCreateMetastoreAssignment struct { + MetastoreAssignment *CreateMetastoreAssignment `tfsdk:"metastore_assignment"` + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` + // Workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +type AccountsCreateStorageCredential struct { + CredentialInfo *CreateStorageCredential `tfsdk:"credential_info"` + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` +} + +type AccountsMetastoreAssignment struct { + MetastoreAssignment *MetastoreAssignment `tfsdk:"metastore_assignment"` +} + +type AccountsMetastoreInfo struct { + MetastoreInfo *MetastoreInfo `tfsdk:"metastore_info"` +} + +type AccountsStorageCredentialInfo struct { + CredentialInfo *StorageCredentialInfo `tfsdk:"credential_info"` +} + +type AccountsUpdateMetastore struct { + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` + + MetastoreInfo *UpdateMetastore `tfsdk:"metastore_info"` +} + +type AccountsUpdateMetastoreAssignment struct { + MetastoreAssignment *UpdateMetastoreAssignment `tfsdk:"metastore_assignment"` + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` + // Workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +type AccountsUpdateStorageCredential struct { + CredentialInfo *UpdateStorageCredential `tfsdk:"credential_info"` + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` + // Name of the storage credential. + StorageCredentialName types.String `tfsdk:"-" url:"-"` +} + +type ArtifactAllowlistInfo struct { + // A list of allowed artifact match patterns. + ArtifactMatchers []ArtifactMatcher `tfsdk:"artifact_matchers"` + // Time at which this artifact allowlist was set, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of the user who set the artifact allowlist. + CreatedBy types.String `tfsdk:"created_by"` + // Unique identifier of parent metastore. + MetastoreId types.String `tfsdk:"metastore_id"` +} + +type ArtifactMatcher struct { + // The artifact path or maven coordinate + Artifact types.String `tfsdk:"artifact"` + // The pattern matching type of the artifact + MatchType MatchType `tfsdk:"match_type"` +} + +// The artifact type +type ArtifactType string + +const ArtifactTypeInitScript ArtifactType = `INIT_SCRIPT` + +const ArtifactTypeLibraryJar ArtifactType = `LIBRARY_JAR` + +const ArtifactTypeLibraryMaven ArtifactType = `LIBRARY_MAVEN` + +// String representation for [fmt.Print] +func (f *ArtifactType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ArtifactType) Set(v string) error { + switch v { + case `INIT_SCRIPT`, `LIBRARY_JAR`, `LIBRARY_MAVEN`: + *f = ArtifactType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INIT_SCRIPT", "LIBRARY_JAR", "LIBRARY_MAVEN"`, v) + } +} + +// Type always returns ArtifactType to satisfy [pflag.Value] interface +func (f *ArtifactType) Type() string { + return "ArtifactType" +} + +type AssignResponse struct { +} + +type AwsIamRoleRequest struct { + // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. + RoleArn types.String `tfsdk:"role_arn"` +} + +type AwsIamRoleResponse struct { + // The external ID used in role assumption to prevent confused deputy + // problem.. + ExternalId types.String `tfsdk:"external_id"` + // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. + RoleArn types.String `tfsdk:"role_arn"` + // The Amazon Resource Name (ARN) of the AWS IAM user managed by Databricks. + // This is the identity that is going to assume the AWS IAM role. + UnityCatalogIamArn types.String `tfsdk:"unity_catalog_iam_arn"` +} + +type AzureManagedIdentityRequest struct { + // The Azure resource ID of the Azure Databricks Access Connector. Use the + // format + // /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}. + AccessConnectorId types.String `tfsdk:"access_connector_id"` + // The Azure resource ID of the managed identity. Use the format + // /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}. + // This is only available for user-assgined identities. For system-assigned + // identities, the access_connector_id is used to identify the identity. If + // this field is not provided, then we assume the AzureManagedIdentity is + // for a system-assigned identity. + ManagedIdentityId types.String `tfsdk:"managed_identity_id"` +} + +type AzureManagedIdentityResponse struct { + // The Azure resource ID of the Azure Databricks Access Connector. Use the + // format + // /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.Databricks/accessConnectors/{connector-name}. + AccessConnectorId types.String `tfsdk:"access_connector_id"` + // The Databricks internal ID that represents this managed identity. + CredentialId types.String `tfsdk:"credential_id"` + // The Azure resource ID of the managed identity. Use the format + // /subscriptions/{guid}/resourceGroups/{rg-name}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identity-name}. + // This is only available for user-assgined identities. For system-assigned + // identities, the access_connector_id is used to identify the identity. If + // this field is not provided, then we assume the AzureManagedIdentity is + // for a system-assigned identity. + ManagedIdentityId types.String `tfsdk:"managed_identity_id"` +} + +type AzureServicePrincipal struct { + // The application ID of the application registration within the referenced + // AAD tenant. + ApplicationId types.String `tfsdk:"application_id"` + // The client secret generated for the above app ID in AAD. + ClientSecret types.String `tfsdk:"client_secret"` + // The directory ID corresponding to the Azure Active Directory (AAD) tenant + // of the application. + DirectoryId types.String `tfsdk:"directory_id"` +} + +// Cancel refresh +type CancelRefreshRequest struct { + // ID of the refresh. + RefreshId types.String `tfsdk:"-" url:"-"` + // Full name of the table. + TableName types.String `tfsdk:"-" url:"-"` +} + +type CancelRefreshResponse struct { +} + +type CatalogInfo struct { + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly types.Bool `tfsdk:"browse_only"` + // The type of the catalog. + CatalogType CatalogType `tfsdk:"catalog_type"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // The name of the connection to an external data source. + ConnectionName types.String `tfsdk:"connection_name"` + // Time at which this catalog was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of catalog creator. + CreatedBy types.String `tfsdk:"created_by"` + + EffectivePredictiveOptimizationFlag *EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `tfsdk:"enable_predictive_optimization"` + // The full name of the catalog. Corresponds with the name field. + FullName types.String `tfsdk:"full_name"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode CatalogIsolationMode `tfsdk:"isolation_mode"` + // Unique identifier of parent metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // Name of catalog. + Name types.String `tfsdk:"name"` + // A map of key-value properties attached to the securable. + Options map[string]types.String `tfsdk:"options"` + // Username of current owner of catalog. + Owner types.String `tfsdk:"owner"` + // A map of key-value properties attached to the securable. + Properties map[string]types.String `tfsdk:"properties"` + // The name of delta sharing provider. + // + // A Delta Sharing catalog is a catalog that is based on a Delta share on a + // remote sharing server. + ProviderName types.String `tfsdk:"provider_name"` + // Status of an asynchronously provisioned resource. + ProvisioningInfo *ProvisioningInfo `tfsdk:"provisioning_info"` + // Kind of catalog securable. + SecurableKind CatalogInfoSecurableKind `tfsdk:"securable_kind"` + + SecurableType types.String `tfsdk:"securable_type"` + // The name of the share under the share provider. + ShareName types.String `tfsdk:"share_name"` + // Storage Location URL (full path) for managed tables within catalog. + StorageLocation types.String `tfsdk:"storage_location"` + // Storage root URL for managed tables within catalog. + StorageRoot types.String `tfsdk:"storage_root"` + // Time at which this catalog was last modified, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified catalog. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +// Kind of catalog securable. +type CatalogInfoSecurableKind string + +const CatalogInfoSecurableKindCatalogDeltasharing CatalogInfoSecurableKind = `CATALOG_DELTASHARING` + +const CatalogInfoSecurableKindCatalogForeignBigquery CatalogInfoSecurableKind = `CATALOG_FOREIGN_BIGQUERY` + +const CatalogInfoSecurableKindCatalogForeignDatabricks CatalogInfoSecurableKind = `CATALOG_FOREIGN_DATABRICKS` + +const CatalogInfoSecurableKindCatalogForeignMysql CatalogInfoSecurableKind = `CATALOG_FOREIGN_MYSQL` + +const CatalogInfoSecurableKindCatalogForeignPostgresql CatalogInfoSecurableKind = `CATALOG_FOREIGN_POSTGRESQL` + +const CatalogInfoSecurableKindCatalogForeignRedshift CatalogInfoSecurableKind = `CATALOG_FOREIGN_REDSHIFT` + +const CatalogInfoSecurableKindCatalogForeignSnowflake CatalogInfoSecurableKind = `CATALOG_FOREIGN_SNOWFLAKE` + +const CatalogInfoSecurableKindCatalogForeignSqldw CatalogInfoSecurableKind = `CATALOG_FOREIGN_SQLDW` + +const CatalogInfoSecurableKindCatalogForeignSqlserver CatalogInfoSecurableKind = `CATALOG_FOREIGN_SQLSERVER` + +const CatalogInfoSecurableKindCatalogInternal CatalogInfoSecurableKind = `CATALOG_INTERNAL` + +const CatalogInfoSecurableKindCatalogStandard CatalogInfoSecurableKind = `CATALOG_STANDARD` + +const CatalogInfoSecurableKindCatalogSystem CatalogInfoSecurableKind = `CATALOG_SYSTEM` + +const CatalogInfoSecurableKindCatalogSystemDeltasharing CatalogInfoSecurableKind = `CATALOG_SYSTEM_DELTASHARING` + +// String representation for [fmt.Print] +func (f *CatalogInfoSecurableKind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CatalogInfoSecurableKind) Set(v string) error { + switch v { + case `CATALOG_DELTASHARING`, `CATALOG_FOREIGN_BIGQUERY`, `CATALOG_FOREIGN_DATABRICKS`, `CATALOG_FOREIGN_MYSQL`, `CATALOG_FOREIGN_POSTGRESQL`, `CATALOG_FOREIGN_REDSHIFT`, `CATALOG_FOREIGN_SNOWFLAKE`, `CATALOG_FOREIGN_SQLDW`, `CATALOG_FOREIGN_SQLSERVER`, `CATALOG_INTERNAL`, `CATALOG_STANDARD`, `CATALOG_SYSTEM`, `CATALOG_SYSTEM_DELTASHARING`: + *f = CatalogInfoSecurableKind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CATALOG_DELTASHARING", "CATALOG_FOREIGN_BIGQUERY", "CATALOG_FOREIGN_DATABRICKS", "CATALOG_FOREIGN_MYSQL", "CATALOG_FOREIGN_POSTGRESQL", "CATALOG_FOREIGN_REDSHIFT", "CATALOG_FOREIGN_SNOWFLAKE", "CATALOG_FOREIGN_SQLDW", "CATALOG_FOREIGN_SQLSERVER", "CATALOG_INTERNAL", "CATALOG_STANDARD", "CATALOG_SYSTEM", "CATALOG_SYSTEM_DELTASHARING"`, v) + } +} + +// Type always returns CatalogInfoSecurableKind to satisfy [pflag.Value] interface +func (f *CatalogInfoSecurableKind) Type() string { + return "CatalogInfoSecurableKind" +} + +// Whether the current securable is accessible from all workspaces or a specific +// set of workspaces. +type CatalogIsolationMode string + +const CatalogIsolationModeIsolated CatalogIsolationMode = `ISOLATED` + +const CatalogIsolationModeOpen CatalogIsolationMode = `OPEN` + +// String representation for [fmt.Print] +func (f *CatalogIsolationMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CatalogIsolationMode) Set(v string) error { + switch v { + case `ISOLATED`, `OPEN`: + *f = CatalogIsolationMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ISOLATED", "OPEN"`, v) + } +} + +// Type always returns CatalogIsolationMode to satisfy [pflag.Value] interface +func (f *CatalogIsolationMode) Type() string { + return "CatalogIsolationMode" +} + +// The type of the catalog. +type CatalogType string + +const CatalogTypeDeltasharingCatalog CatalogType = `DELTASHARING_CATALOG` + +const CatalogTypeManagedCatalog CatalogType = `MANAGED_CATALOG` + +const CatalogTypeSystemCatalog CatalogType = `SYSTEM_CATALOG` + +// String representation for [fmt.Print] +func (f *CatalogType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CatalogType) Set(v string) error { + switch v { + case `DELTASHARING_CATALOG`, `MANAGED_CATALOG`, `SYSTEM_CATALOG`: + *f = CatalogType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELTASHARING_CATALOG", "MANAGED_CATALOG", "SYSTEM_CATALOG"`, v) + } +} + +// Type always returns CatalogType to satisfy [pflag.Value] interface +func (f *CatalogType) Type() string { + return "CatalogType" +} + +type CloudflareApiToken struct { + // The Cloudflare access key id of the token. + AccessKeyId types.String `tfsdk:"access_key_id"` + // The account id associated with the API token. + AccountId types.String `tfsdk:"account_id"` + // The secret access token generated for the access key id + SecretAccessKey types.String `tfsdk:"secret_access_key"` +} + +type ColumnInfo struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + + Mask *ColumnMask `tfsdk:"mask"` + // Name of Column. + Name types.String `tfsdk:"name"` + // Whether field may be Null (default: true). + Nullable types.Bool `tfsdk:"nullable"` + // Partition index for column. + PartitionIndex types.Int64 `tfsdk:"partition_index"` + // Ordinal position of column (starting at position 0). + Position types.Int64 `tfsdk:"position"` + // Format of IntervalType. + TypeIntervalType types.String `tfsdk:"type_interval_type"` + // Full data type specification, JSON-serialized. + TypeJson types.String `tfsdk:"type_json"` + // Name of type (INT, STRUCT, MAP, etc.). + TypeName ColumnTypeName `tfsdk:"type_name"` + // Digits of precision; required for DecimalTypes. + TypePrecision types.Int64 `tfsdk:"type_precision"` + // Digits to right of decimal; Required for DecimalTypes. + TypeScale types.Int64 `tfsdk:"type_scale"` + // Full data type specification as SQL/catalogString text. + TypeText types.String `tfsdk:"type_text"` +} + +type ColumnMask struct { + // The full name of the column mask SQL UDF. + FunctionName types.String `tfsdk:"function_name"` + // The list of additional table columns to be passed as input to the column + // mask function. The first arg of the mask function should be of the type + // of the column being masked and the types of the rest of the args should + // match the types of columns in 'using_column_names'. + UsingColumnNames []types.String `tfsdk:"using_column_names"` +} + +// Name of type (INT, STRUCT, MAP, etc.). +type ColumnTypeName string + +const ColumnTypeNameArray ColumnTypeName = `ARRAY` + +const ColumnTypeNameBinary ColumnTypeName = `BINARY` + +const ColumnTypeNameBoolean ColumnTypeName = `BOOLEAN` + +const ColumnTypeNameByte ColumnTypeName = `BYTE` + +const ColumnTypeNameChar ColumnTypeName = `CHAR` + +const ColumnTypeNameDate ColumnTypeName = `DATE` + +const ColumnTypeNameDecimal ColumnTypeName = `DECIMAL` + +const ColumnTypeNameDouble ColumnTypeName = `DOUBLE` + +const ColumnTypeNameFloat ColumnTypeName = `FLOAT` + +const ColumnTypeNameInt ColumnTypeName = `INT` + +const ColumnTypeNameInterval ColumnTypeName = `INTERVAL` + +const ColumnTypeNameLong ColumnTypeName = `LONG` + +const ColumnTypeNameMap ColumnTypeName = `MAP` + +const ColumnTypeNameNull ColumnTypeName = `NULL` + +const ColumnTypeNameShort ColumnTypeName = `SHORT` + +const ColumnTypeNameString ColumnTypeName = `STRING` + +const ColumnTypeNameStruct ColumnTypeName = `STRUCT` + +const ColumnTypeNameTableType ColumnTypeName = `TABLE_TYPE` + +const ColumnTypeNameTimestamp ColumnTypeName = `TIMESTAMP` + +const ColumnTypeNameTimestampNtz ColumnTypeName = `TIMESTAMP_NTZ` + +const ColumnTypeNameUserDefinedType ColumnTypeName = `USER_DEFINED_TYPE` + +// String representation for [fmt.Print] +func (f *ColumnTypeName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ColumnTypeName) Set(v string) error { + switch v { + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`: + *f = ColumnTypeName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE"`, v) + } +} + +// Type always returns ColumnTypeName to satisfy [pflag.Value] interface +func (f *ColumnTypeName) Type() string { + return "ColumnTypeName" +} + +type ConnectionInfo struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Unique identifier of the Connection. + ConnectionId types.String `tfsdk:"connection_id"` + // The type of connection. + ConnectionType ConnectionType `tfsdk:"connection_type"` + // Time at which this connection was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of connection creator. + CreatedBy types.String `tfsdk:"created_by"` + // The type of credential. + CredentialType CredentialType `tfsdk:"credential_type"` + // Full name of connection. + FullName types.String `tfsdk:"full_name"` + // Unique identifier of parent metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // Name of the connection. + Name types.String `tfsdk:"name"` + // A map of key-value properties attached to the securable. + Options map[string]types.String `tfsdk:"options"` + // Username of current owner of the connection. + Owner types.String `tfsdk:"owner"` + // An object containing map of key-value properties attached to the + // connection. + Properties map[string]types.String `tfsdk:"properties"` + // Status of an asynchronously provisioned resource. + ProvisioningInfo *ProvisioningInfo `tfsdk:"provisioning_info"` + // If the connection is read only. + ReadOnly types.Bool `tfsdk:"read_only"` + // Kind of connection securable. + SecurableKind ConnectionInfoSecurableKind `tfsdk:"securable_kind"` + + SecurableType types.String `tfsdk:"securable_type"` + // Time at which this connection was updated, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified connection. + UpdatedBy types.String `tfsdk:"updated_by"` + // URL of the remote data source, extracted from options. + Url types.String `tfsdk:"url"` +} + +// Kind of connection securable. +type ConnectionInfoSecurableKind string + +const ConnectionInfoSecurableKindConnectionBigquery ConnectionInfoSecurableKind = `CONNECTION_BIGQUERY` + +const ConnectionInfoSecurableKindConnectionDatabricks ConnectionInfoSecurableKind = `CONNECTION_DATABRICKS` + +const ConnectionInfoSecurableKindConnectionMysql ConnectionInfoSecurableKind = `CONNECTION_MYSQL` + +const ConnectionInfoSecurableKindConnectionOnlineCatalog ConnectionInfoSecurableKind = `CONNECTION_ONLINE_CATALOG` + +const ConnectionInfoSecurableKindConnectionPostgresql ConnectionInfoSecurableKind = `CONNECTION_POSTGRESQL` + +const ConnectionInfoSecurableKindConnectionRedshift ConnectionInfoSecurableKind = `CONNECTION_REDSHIFT` + +const ConnectionInfoSecurableKindConnectionSnowflake ConnectionInfoSecurableKind = `CONNECTION_SNOWFLAKE` + +const ConnectionInfoSecurableKindConnectionSqldw ConnectionInfoSecurableKind = `CONNECTION_SQLDW` + +const ConnectionInfoSecurableKindConnectionSqlserver ConnectionInfoSecurableKind = `CONNECTION_SQLSERVER` + +// String representation for [fmt.Print] +func (f *ConnectionInfoSecurableKind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ConnectionInfoSecurableKind) Set(v string) error { + switch v { + case `CONNECTION_BIGQUERY`, `CONNECTION_DATABRICKS`, `CONNECTION_MYSQL`, `CONNECTION_ONLINE_CATALOG`, `CONNECTION_POSTGRESQL`, `CONNECTION_REDSHIFT`, `CONNECTION_SNOWFLAKE`, `CONNECTION_SQLDW`, `CONNECTION_SQLSERVER`: + *f = ConnectionInfoSecurableKind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CONNECTION_BIGQUERY", "CONNECTION_DATABRICKS", "CONNECTION_MYSQL", "CONNECTION_ONLINE_CATALOG", "CONNECTION_POSTGRESQL", "CONNECTION_REDSHIFT", "CONNECTION_SNOWFLAKE", "CONNECTION_SQLDW", "CONNECTION_SQLSERVER"`, v) + } +} + +// Type always returns ConnectionInfoSecurableKind to satisfy [pflag.Value] interface +func (f *ConnectionInfoSecurableKind) Type() string { + return "ConnectionInfoSecurableKind" +} + +// The type of connection. +type ConnectionType string + +const ConnectionTypeBigquery ConnectionType = `BIGQUERY` + +const ConnectionTypeDatabricks ConnectionType = `DATABRICKS` + +const ConnectionTypeMysql ConnectionType = `MYSQL` + +const ConnectionTypePostgresql ConnectionType = `POSTGRESQL` + +const ConnectionTypeRedshift ConnectionType = `REDSHIFT` + +const ConnectionTypeSnowflake ConnectionType = `SNOWFLAKE` + +const ConnectionTypeSqldw ConnectionType = `SQLDW` + +const ConnectionTypeSqlserver ConnectionType = `SQLSERVER` + +// String representation for [fmt.Print] +func (f *ConnectionType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ConnectionType) Set(v string) error { + switch v { + case `BIGQUERY`, `DATABRICKS`, `MYSQL`, `POSTGRESQL`, `REDSHIFT`, `SNOWFLAKE`, `SQLDW`, `SQLSERVER`: + *f = ConnectionType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BIGQUERY", "DATABRICKS", "MYSQL", "POSTGRESQL", "REDSHIFT", "SNOWFLAKE", "SQLDW", "SQLSERVER"`, v) + } +} + +// Type always returns ConnectionType to satisfy [pflag.Value] interface +func (f *ConnectionType) Type() string { + return "ConnectionType" +} + +// Detailed status of an online table. Shown if the online table is in the +// ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. +type ContinuousUpdateStatus struct { + // Progress of the initial data synchronization. + InitialPipelineSyncProgress *PipelineProgress `tfsdk:"initial_pipeline_sync_progress"` + // The last source table Delta version that was synced to the online table. + // Note that this Delta version may not be completely synced to the online + // table yet. + LastProcessedCommitVersion types.Int64 `tfsdk:"last_processed_commit_version"` + // The timestamp of the last time any data was synchronized from the source + // table to the online table. + Timestamp types.String `tfsdk:"timestamp"` +} + +type CreateCatalog struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // The name of the connection to an external data source. + ConnectionName types.String `tfsdk:"connection_name"` + // Name of catalog. + Name types.String `tfsdk:"name"` + // A map of key-value properties attached to the securable. + Options map[string]types.String `tfsdk:"options"` + // A map of key-value properties attached to the securable. + Properties map[string]types.String `tfsdk:"properties"` + // The name of delta sharing provider. + // + // A Delta Sharing catalog is a catalog that is based on a Delta share on a + // remote sharing server. + ProviderName types.String `tfsdk:"provider_name"` + // The name of the share under the share provider. + ShareName types.String `tfsdk:"share_name"` + // Storage root URL for managed tables within catalog. + StorageRoot types.String `tfsdk:"storage_root"` +} + +type CreateConnection struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // The type of connection. + ConnectionType ConnectionType `tfsdk:"connection_type"` + // Name of the connection. + Name types.String `tfsdk:"name"` + // A map of key-value properties attached to the securable. + Options map[string]types.String `tfsdk:"options"` + // An object containing map of key-value properties attached to the + // connection. + Properties map[string]types.String `tfsdk:"properties"` + // If the connection is read only. + ReadOnly types.Bool `tfsdk:"read_only"` +} + +type CreateExternalLocation struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint types.String `tfsdk:"access_point"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Name of the storage credential used with this location. + CredentialName types.String `tfsdk:"credential_name"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details"` + // Name of the external location. + Name types.String `tfsdk:"name"` + // Indicates whether the external location is read-only. + ReadOnly types.Bool `tfsdk:"read_only"` + // Skips validation of the storage credential associated with the external + // location. + SkipValidation types.Bool `tfsdk:"skip_validation"` + // Path URL of the external location. + Url types.String `tfsdk:"url"` +} + +type CreateFunction struct { + // Name of parent catalog. + CatalogName types.String `tfsdk:"catalog_name"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Scalar function return data type. + DataType ColumnTypeName `tfsdk:"data_type"` + // External function language. + ExternalLanguage types.String `tfsdk:"external_language"` + // External function name. + ExternalName types.String `tfsdk:"external_name"` + // Pretty printed function data type. + FullDataType types.String `tfsdk:"full_data_type"` + + InputParams FunctionParameterInfos `tfsdk:"input_params"` + // Whether the function is deterministic. + IsDeterministic types.Bool `tfsdk:"is_deterministic"` + // Function null call. + IsNullCall types.Bool `tfsdk:"is_null_call"` + // Name of function, relative to parent schema. + Name types.String `tfsdk:"name"` + // Function parameter style. **S** is the value for SQL. + ParameterStyle CreateFunctionParameterStyle `tfsdk:"parameter_style"` + // JSON-serialized key-value pair map, encoded (escaped) as a string. + Properties types.String `tfsdk:"properties"` + // Table function return parameters. + ReturnParams FunctionParameterInfos `tfsdk:"return_params"` + // Function language. When **EXTERNAL** is used, the language of the routine + // function should be specified in the __external_language__ field, and the + // __return_params__ of the function cannot be used (as **TABLE** return + // type is not supported), and the __sql_data_access__ field must be + // **NO_SQL**. + RoutineBody CreateFunctionRoutineBody `tfsdk:"routine_body"` + // Function body. + RoutineDefinition types.String `tfsdk:"routine_definition"` + // Function dependencies. + RoutineDependencies DependencyList `tfsdk:"routine_dependencies"` + // Name of parent schema relative to its parent catalog. + SchemaName types.String `tfsdk:"schema_name"` + // Function security type. + SecurityType CreateFunctionSecurityType `tfsdk:"security_type"` + // Specific name of the function; Reserved for future use. + SpecificName types.String `tfsdk:"specific_name"` + // Function SQL data access. + SqlDataAccess CreateFunctionSqlDataAccess `tfsdk:"sql_data_access"` + // List of schemes whose objects can be referenced without qualification. + SqlPath types.String `tfsdk:"sql_path"` +} + +// Function parameter style. **S** is the value for SQL. +type CreateFunctionParameterStyle string + +const CreateFunctionParameterStyleS CreateFunctionParameterStyle = `S` + +// String representation for [fmt.Print] +func (f *CreateFunctionParameterStyle) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateFunctionParameterStyle) Set(v string) error { + switch v { + case `S`: + *f = CreateFunctionParameterStyle(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "S"`, v) + } +} + +// Type always returns CreateFunctionParameterStyle to satisfy [pflag.Value] interface +func (f *CreateFunctionParameterStyle) Type() string { + return "CreateFunctionParameterStyle" +} + +type CreateFunctionRequest struct { + // Partial __FunctionInfo__ specifying the function to be created. + FunctionInfo CreateFunction `tfsdk:"function_info"` +} + +// Function language. When **EXTERNAL** is used, the language of the routine +// function should be specified in the __external_language__ field, and the +// __return_params__ of the function cannot be used (as **TABLE** return type is +// not supported), and the __sql_data_access__ field must be **NO_SQL**. +type CreateFunctionRoutineBody string + +const CreateFunctionRoutineBodyExternal CreateFunctionRoutineBody = `EXTERNAL` + +const CreateFunctionRoutineBodySql CreateFunctionRoutineBody = `SQL` + +// String representation for [fmt.Print] +func (f *CreateFunctionRoutineBody) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateFunctionRoutineBody) Set(v string) error { + switch v { + case `EXTERNAL`, `SQL`: + *f = CreateFunctionRoutineBody(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL", "SQL"`, v) + } +} + +// Type always returns CreateFunctionRoutineBody to satisfy [pflag.Value] interface +func (f *CreateFunctionRoutineBody) Type() string { + return "CreateFunctionRoutineBody" +} + +// Function security type. +type CreateFunctionSecurityType string + +const CreateFunctionSecurityTypeDefiner CreateFunctionSecurityType = `DEFINER` + +// String representation for [fmt.Print] +func (f *CreateFunctionSecurityType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateFunctionSecurityType) Set(v string) error { + switch v { + case `DEFINER`: + *f = CreateFunctionSecurityType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEFINER"`, v) + } +} + +// Type always returns CreateFunctionSecurityType to satisfy [pflag.Value] interface +func (f *CreateFunctionSecurityType) Type() string { + return "CreateFunctionSecurityType" +} + +// Function SQL data access. +type CreateFunctionSqlDataAccess string + +const CreateFunctionSqlDataAccessContainsSql CreateFunctionSqlDataAccess = `CONTAINS_SQL` + +const CreateFunctionSqlDataAccessNoSql CreateFunctionSqlDataAccess = `NO_SQL` + +const CreateFunctionSqlDataAccessReadsSqlData CreateFunctionSqlDataAccess = `READS_SQL_DATA` + +// String representation for [fmt.Print] +func (f *CreateFunctionSqlDataAccess) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateFunctionSqlDataAccess) Set(v string) error { + switch v { + case `CONTAINS_SQL`, `NO_SQL`, `READS_SQL_DATA`: + *f = CreateFunctionSqlDataAccess(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CONTAINS_SQL", "NO_SQL", "READS_SQL_DATA"`, v) + } +} + +// Type always returns CreateFunctionSqlDataAccess to satisfy [pflag.Value] interface +func (f *CreateFunctionSqlDataAccess) Type() string { + return "CreateFunctionSqlDataAccess" +} + +type CreateMetastore struct { + // The user-specified name of the metastore. + Name types.String `tfsdk:"name"` + // Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). + // The field can be omitted in the __workspace-level__ __API__ but not in + // the __account-level__ __API__. If this field is omitted, the region of + // the workspace receiving the request will be used. + Region types.String `tfsdk:"region"` + // The storage root URL for metastore + StorageRoot types.String `tfsdk:"storage_root"` +} + +type CreateMetastoreAssignment struct { + // The name of the default catalog in the metastore. + DefaultCatalogName types.String `tfsdk:"default_catalog_name"` + // The unique ID of the metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // A workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +type CreateMonitor struct { + // The directory to store monitoring assets (e.g. dashboard, metric tables). + AssetsDir types.String `tfsdk:"assets_dir"` + // Name of the baseline table from which drift metrics are computed from. + // Columns in the monitored table should also be present in the baseline + // table. + BaselineTableName types.String `tfsdk:"baseline_table_name"` + // Custom metrics to compute on the monitored table. These can be aggregate + // metrics, derived metrics (from already computed aggregate metrics), or + // drift metrics (comparing metrics across time windows). + CustomMetrics []MonitorMetric `tfsdk:"custom_metrics"` + // The data classification config for the monitor. + DataClassificationConfig *MonitorDataClassificationConfig `tfsdk:"data_classification_config"` + // Configuration for monitoring inference logs. + InferenceLog *MonitorInferenceLog `tfsdk:"inference_log"` + // The notification settings for the monitor. + Notifications *MonitorNotifications `tfsdk:"notifications"` + // Schema where output metric tables are created. + OutputSchemaName types.String `tfsdk:"output_schema_name"` + // The schedule for automatically updating and refreshing metric tables. + Schedule *MonitorCronSchedule `tfsdk:"schedule"` + // Whether to skip creating a default dashboard summarizing data quality + // metrics. + SkipBuiltinDashboard types.Bool `tfsdk:"skip_builtin_dashboard"` + // List of column expressions to slice data with for targeted analysis. The + // data is grouped by each expression independently, resulting in a separate + // slice for each predicate and its complements. For high-cardinality + // columns, only the top 100 unique values by frequency will generate + // slices. + SlicingExprs []types.String `tfsdk:"slicing_exprs"` + // Configuration for monitoring snapshot tables. + Snapshot *MonitorSnapshot `tfsdk:"snapshot"` + // Full name of the table. + TableName types.String `tfsdk:"-" url:"-"` + // Configuration for monitoring time series tables. + TimeSeries *MonitorTimeSeries `tfsdk:"time_series"` + // Optional argument to specify the warehouse for dashboard creation. If not + // specified, the first running warehouse will be used. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +// Online Table information. +type CreateOnlineTableRequest struct { + // Full three-part (catalog, schema, table) name of the table. + Name types.String `tfsdk:"name"` + // Specification of the online table. + Spec *OnlineTableSpec `tfsdk:"spec"` +} + +type CreateRegisteredModelRequest struct { + // The name of the catalog where the schema and the registered model reside + CatalogName types.String `tfsdk:"catalog_name"` + // The comment attached to the registered model + Comment types.String `tfsdk:"comment"` + // The name of the registered model + Name types.String `tfsdk:"name"` + // The name of the schema where the registered model resides + SchemaName types.String `tfsdk:"schema_name"` + // The storage location on the cloud under which model version data files + // are stored + StorageLocation types.String `tfsdk:"storage_location"` +} + +type CreateResponse struct { +} + +type CreateSchema struct { + // Name of parent catalog. + CatalogName types.String `tfsdk:"catalog_name"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Name of schema, relative to parent catalog. + Name types.String `tfsdk:"name"` + // A map of key-value properties attached to the securable. + Properties map[string]types.String `tfsdk:"properties"` + // Storage root URL for managed tables within schema. + StorageRoot types.String `tfsdk:"storage_root"` +} + +type CreateStorageCredential struct { + // The AWS IAM role configuration. + AwsIamRole *AwsIamRoleRequest `tfsdk:"aws_iam_role"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentityRequest `tfsdk:"azure_managed_identity"` + // The Azure service principal configuration. + AzureServicePrincipal *AzureServicePrincipal `tfsdk:"azure_service_principal"` + // The Cloudflare API token configuration. + CloudflareApiToken *CloudflareApiToken `tfsdk:"cloudflare_api_token"` + // Comment associated with the credential. + Comment types.String `tfsdk:"comment"` + // The Databricks managed GCP service account configuration. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account"` + // The credential name. The name must be unique within the metastore. + Name types.String `tfsdk:"name"` + // Whether the storage credential is only usable for read operations. + ReadOnly types.Bool `tfsdk:"read_only"` + // Supplying true to this argument skips validation of the created + // credential. + SkipValidation types.Bool `tfsdk:"skip_validation"` +} + +type CreateTableConstraint struct { + // A table constraint, as defined by *one* of the following fields being + // set: __primary_key_constraint__, __foreign_key_constraint__, + // __named_table_constraint__. + Constraint TableConstraint `tfsdk:"constraint"` + // The full name of the table referenced by the constraint. + FullNameArg types.String `tfsdk:"full_name_arg"` +} + +type CreateVolumeRequestContent struct { + // The name of the catalog where the schema and the volume are + CatalogName types.String `tfsdk:"catalog_name"` + // The comment attached to the volume + Comment types.String `tfsdk:"comment"` + // The name of the volume + Name types.String `tfsdk:"name"` + // The name of the schema where the volume is + SchemaName types.String `tfsdk:"schema_name"` + // The storage location on the cloud + StorageLocation types.String `tfsdk:"storage_location"` + + VolumeType VolumeType `tfsdk:"volume_type"` +} + +// The type of credential. +type CredentialType string + +const CredentialTypeUsernamePassword CredentialType = `USERNAME_PASSWORD` + +// String representation for [fmt.Print] +func (f *CredentialType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CredentialType) Set(v string) error { + switch v { + case `USERNAME_PASSWORD`: + *f = CredentialType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "USERNAME_PASSWORD"`, v) + } +} + +// Type always returns CredentialType to satisfy [pflag.Value] interface +func (f *CredentialType) Type() string { + return "CredentialType" +} + +// Currently assigned workspaces +type CurrentWorkspaceBindings struct { + // A list of workspace IDs. + Workspaces []types.Int64 `tfsdk:"workspaces"` +} + +// Data source format +type DataSourceFormat string + +const DataSourceFormatAvro DataSourceFormat = `AVRO` + +const DataSourceFormatBigqueryFormat DataSourceFormat = `BIGQUERY_FORMAT` + +const DataSourceFormatCsv DataSourceFormat = `CSV` + +const DataSourceFormatDatabricksFormat DataSourceFormat = `DATABRICKS_FORMAT` + +const DataSourceFormatDelta DataSourceFormat = `DELTA` + +const DataSourceFormatDeltasharing DataSourceFormat = `DELTASHARING` + +const DataSourceFormatHiveCustom DataSourceFormat = `HIVE_CUSTOM` + +const DataSourceFormatHiveSerde DataSourceFormat = `HIVE_SERDE` + +const DataSourceFormatJson DataSourceFormat = `JSON` + +const DataSourceFormatMysqlFormat DataSourceFormat = `MYSQL_FORMAT` + +const DataSourceFormatNetsuiteFormat DataSourceFormat = `NETSUITE_FORMAT` + +const DataSourceFormatOrc DataSourceFormat = `ORC` + +const DataSourceFormatParquet DataSourceFormat = `PARQUET` + +const DataSourceFormatPostgresqlFormat DataSourceFormat = `POSTGRESQL_FORMAT` + +const DataSourceFormatRedshiftFormat DataSourceFormat = `REDSHIFT_FORMAT` + +const DataSourceFormatSalesforceFormat DataSourceFormat = `SALESFORCE_FORMAT` + +const DataSourceFormatSnowflakeFormat DataSourceFormat = `SNOWFLAKE_FORMAT` + +const DataSourceFormatSqldwFormat DataSourceFormat = `SQLDW_FORMAT` + +const DataSourceFormatSqlserverFormat DataSourceFormat = `SQLSERVER_FORMAT` + +const DataSourceFormatText DataSourceFormat = `TEXT` + +const DataSourceFormatUnityCatalog DataSourceFormat = `UNITY_CATALOG` + +const DataSourceFormatVectorIndexFormat DataSourceFormat = `VECTOR_INDEX_FORMAT` + +const DataSourceFormatWorkdayRaasFormat DataSourceFormat = `WORKDAY_RAAS_FORMAT` + +// String representation for [fmt.Print] +func (f *DataSourceFormat) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataSourceFormat) Set(v string) error { + switch v { + case `AVRO`, `BIGQUERY_FORMAT`, `CSV`, `DATABRICKS_FORMAT`, `DELTA`, `DELTASHARING`, `HIVE_CUSTOM`, `HIVE_SERDE`, `JSON`, `MYSQL_FORMAT`, `NETSUITE_FORMAT`, `ORC`, `PARQUET`, `POSTGRESQL_FORMAT`, `REDSHIFT_FORMAT`, `SALESFORCE_FORMAT`, `SNOWFLAKE_FORMAT`, `SQLDW_FORMAT`, `SQLSERVER_FORMAT`, `TEXT`, `UNITY_CATALOG`, `VECTOR_INDEX_FORMAT`, `WORKDAY_RAAS_FORMAT`: + *f = DataSourceFormat(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AVRO", "BIGQUERY_FORMAT", "CSV", "DATABRICKS_FORMAT", "DELTA", "DELTASHARING", "HIVE_CUSTOM", "HIVE_SERDE", "JSON", "MYSQL_FORMAT", "NETSUITE_FORMAT", "ORC", "PARQUET", "POSTGRESQL_FORMAT", "REDSHIFT_FORMAT", "SALESFORCE_FORMAT", "SNOWFLAKE_FORMAT", "SQLDW_FORMAT", "SQLSERVER_FORMAT", "TEXT", "UNITY_CATALOG", "VECTOR_INDEX_FORMAT", "WORKDAY_RAAS_FORMAT"`, v) + } +} + +// Type always returns DataSourceFormat to satisfy [pflag.Value] interface +func (f *DataSourceFormat) Type() string { + return "DataSourceFormat" +} + +type DatabricksGcpServiceAccountRequest struct { +} + +type DatabricksGcpServiceAccountResponse struct { + // The Databricks internal ID that represents this service account. This is + // an output-only field. + CredentialId types.String `tfsdk:"credential_id"` + // The email of the service account. This is an output-only field. + Email types.String `tfsdk:"email"` +} + +// Delete a metastore assignment +type DeleteAccountMetastoreAssignmentRequest struct { + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` + // Workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +// Delete a metastore +type DeleteAccountMetastoreRequest struct { + // Force deletion even if the metastore is not empty. Default is false. + Force types.Bool `tfsdk:"-" url:"force,omitempty"` + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` +} + +// Delete a storage credential +type DeleteAccountStorageCredentialRequest struct { + // Force deletion even if the Storage Credential is not empty. Default is + // false. + Force types.Bool `tfsdk:"-" url:"force,omitempty"` + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` + // Name of the storage credential. + StorageCredentialName types.String `tfsdk:"-" url:"-"` +} + +// Delete a Registered Model Alias +type DeleteAliasRequest struct { + // The name of the alias + Alias types.String `tfsdk:"-" url:"-"` + // The three-level (fully qualified) name of the registered model + FullName types.String `tfsdk:"-" url:"-"` +} + +type DeleteAliasResponse struct { +} + +// Delete a catalog +type DeleteCatalogRequest struct { + // Force deletion even if the catalog is not empty. + Force types.Bool `tfsdk:"-" url:"force,omitempty"` + // The name of the catalog. + Name types.String `tfsdk:"-" url:"-"` +} + +// Delete a connection +type DeleteConnectionRequest struct { + // The name of the connection to be deleted. + Name types.String `tfsdk:"-" url:"-"` +} + +// Delete an external location +type DeleteExternalLocationRequest struct { + // Force deletion even if there are dependent external tables or mounts. + Force types.Bool `tfsdk:"-" url:"force,omitempty"` + // Name of the external location. + Name types.String `tfsdk:"-" url:"-"` +} + +// Delete a function +type DeleteFunctionRequest struct { + // Force deletion even if the function is notempty. + Force types.Bool `tfsdk:"-" url:"force,omitempty"` + // The fully-qualified name of the function (of the form + // __catalog_name__.__schema_name__.__function__name__). + Name types.String `tfsdk:"-" url:"-"` +} + +// Delete a metastore +type DeleteMetastoreRequest struct { + // Force deletion even if the metastore is not empty. Default is false. + Force types.Bool `tfsdk:"-" url:"force,omitempty"` + // Unique ID of the metastore. + Id types.String `tfsdk:"-" url:"-"` +} + +// Delete a Model Version +type DeleteModelVersionRequest struct { + // The three-level (fully qualified) name of the model version + FullName types.String `tfsdk:"-" url:"-"` + // The integer version number of the model version + Version types.Int64 `tfsdk:"-" url:"-"` +} + +// Delete an Online Table +type DeleteOnlineTableRequest struct { + // Full three-part (catalog, schema, table) name of the table. + Name types.String `tfsdk:"-" url:"-"` +} + +// Delete a table monitor +type DeleteQualityMonitorRequest struct { + // Full name of the table. + TableName types.String `tfsdk:"-" url:"-"` +} + +// Delete a Registered Model +type DeleteRegisteredModelRequest struct { + // The three-level (fully qualified) name of the registered model + FullName types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete a schema +type DeleteSchemaRequest struct { + // Full name of the schema. + FullName types.String `tfsdk:"-" url:"-"` +} + +// Delete a credential +type DeleteStorageCredentialRequest struct { + // Force deletion even if there are dependent external locations or external + // tables. + Force types.Bool `tfsdk:"-" url:"force,omitempty"` + // Name of the storage credential. + Name types.String `tfsdk:"-" url:"-"` +} + +// Delete a table constraint +type DeleteTableConstraintRequest struct { + // If true, try deleting all child constraints of the current constraint. If + // false, reject this operation if the current constraint has any child + // constraints. + Cascade types.Bool `tfsdk:"-" url:"cascade"` + // The name of the constraint to delete. + ConstraintName types.String `tfsdk:"-" url:"constraint_name"` + // Full name of the table referenced by the constraint. + FullName types.String `tfsdk:"-" url:"-"` +} + +// Delete a table +type DeleteTableRequest struct { + // Full name of the table. + FullName types.String `tfsdk:"-" url:"-"` +} + +// Delete a Volume +type DeleteVolumeRequest struct { + // The three-level (fully qualified) name of the volume + Name types.String `tfsdk:"-" url:"-"` +} + +// Properties pertaining to the current state of the delta table as given by the +// commit server. This does not contain **delta.*** (input) properties in +// __TableInfo.properties__. +type DeltaRuntimePropertiesKvPairs struct { + // A map of key-value properties attached to the securable. + DeltaRuntimeProperties map[string]types.String `tfsdk:"delta_runtime_properties"` +} + +// A dependency of a SQL object. Either the __table__ field or the __function__ +// field must be defined. +type Dependency struct { + // A function that is dependent on a SQL object. + Function *FunctionDependency `tfsdk:"function"` + // A table that is dependent on a SQL object. + Table *TableDependency `tfsdk:"table"` +} + +// A list of dependencies. +type DependencyList struct { + // Array of dependencies. + Dependencies []Dependency `tfsdk:"dependencies"` +} + +// Disable a system schema +type DisableRequest struct { + // The metastore ID under which the system schema lives. + MetastoreId types.String `tfsdk:"-" url:"-"` + // Full name of the system schema. + SchemaName types.String `tfsdk:"-" url:"-"` +} + +type DisableResponse struct { +} + +type EffectivePermissionsList struct { + // The privileges conveyed to each principal (either directly or via + // inheritance) + PrivilegeAssignments []EffectivePrivilegeAssignment `tfsdk:"privilege_assignments"` +} + +type EffectivePredictiveOptimizationFlag struct { + // The name of the object from which the flag was inherited. If there was no + // inheritance, this field is left blank. + InheritedFromName types.String `tfsdk:"inherited_from_name"` + // The type of the object from which the flag was inherited. If there was no + // inheritance, this field is left blank. + InheritedFromType EffectivePredictiveOptimizationFlagInheritedFromType `tfsdk:"inherited_from_type"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + Value EnablePredictiveOptimization `tfsdk:"value"` +} + +// The type of the object from which the flag was inherited. If there was no +// inheritance, this field is left blank. +type EffectivePredictiveOptimizationFlagInheritedFromType string + +const EffectivePredictiveOptimizationFlagInheritedFromTypeCatalog EffectivePredictiveOptimizationFlagInheritedFromType = `CATALOG` + +const EffectivePredictiveOptimizationFlagInheritedFromTypeSchema EffectivePredictiveOptimizationFlagInheritedFromType = `SCHEMA` + +// String representation for [fmt.Print] +func (f *EffectivePredictiveOptimizationFlagInheritedFromType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EffectivePredictiveOptimizationFlagInheritedFromType) Set(v string) error { + switch v { + case `CATALOG`, `SCHEMA`: + *f = EffectivePredictiveOptimizationFlagInheritedFromType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CATALOG", "SCHEMA"`, v) + } +} + +// Type always returns EffectivePredictiveOptimizationFlagInheritedFromType to satisfy [pflag.Value] interface +func (f *EffectivePredictiveOptimizationFlagInheritedFromType) Type() string { + return "EffectivePredictiveOptimizationFlagInheritedFromType" +} + +type EffectivePrivilege struct { + // The full name of the object that conveys this privilege via inheritance. + // This field is omitted when privilege is not inherited (it's assigned to + // the securable itself). + InheritedFromName types.String `tfsdk:"inherited_from_name"` + // The type of the object that conveys this privilege via inheritance. This + // field is omitted when privilege is not inherited (it's assigned to the + // securable itself). + InheritedFromType SecurableType `tfsdk:"inherited_from_type"` + // The privilege assigned to the principal. + Privilege Privilege `tfsdk:"privilege"` +} + +type EffectivePrivilegeAssignment struct { + // The principal (user email address or group name). + Principal types.String `tfsdk:"principal"` + // The privileges conveyed to the principal (either directly or via + // inheritance). + Privileges []EffectivePrivilege `tfsdk:"privileges"` +} + +// Whether predictive optimization should be enabled for this object and objects +// under it. +type EnablePredictiveOptimization string + +const EnablePredictiveOptimizationDisable EnablePredictiveOptimization = `DISABLE` + +const EnablePredictiveOptimizationEnable EnablePredictiveOptimization = `ENABLE` + +const EnablePredictiveOptimizationInherit EnablePredictiveOptimization = `INHERIT` + +// String representation for [fmt.Print] +func (f *EnablePredictiveOptimization) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EnablePredictiveOptimization) Set(v string) error { + switch v { + case `DISABLE`, `ENABLE`, `INHERIT`: + *f = EnablePredictiveOptimization(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISABLE", "ENABLE", "INHERIT"`, v) + } +} + +// Type always returns EnablePredictiveOptimization to satisfy [pflag.Value] interface +func (f *EnablePredictiveOptimization) Type() string { + return "EnablePredictiveOptimization" +} + +// Enable a system schema +type EnableRequest struct { + // The metastore ID under which the system schema lives. + MetastoreId types.String `tfsdk:"-" url:"-"` + // Full name of the system schema. + SchemaName types.String `tfsdk:"-" url:"-"` +} + +type EnableResponse struct { +} + +// Encryption options that apply to clients connecting to cloud storage. +type EncryptionDetails struct { + // Server-Side Encryption properties for clients communicating with AWS s3. + SseEncryptionDetails *SseEncryptionDetails `tfsdk:"sse_encryption_details"` +} + +// Get boolean reflecting if table exists +type ExistsRequest struct { + // Full name of the table. + FullName types.String `tfsdk:"-" url:"-"` +} + +type ExternalLocationInfo struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint types.String `tfsdk:"access_point"` + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly types.Bool `tfsdk:"browse_only"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Time at which this external location was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of external location creator. + CreatedBy types.String `tfsdk:"created_by"` + // Unique ID of the location's storage credential. + CredentialId types.String `tfsdk:"credential_id"` + // Name of the storage credential used with this location. + CredentialName types.String `tfsdk:"credential_name"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode IsolationMode `tfsdk:"isolation_mode"` + // Unique identifier of metastore hosting the external location. + MetastoreId types.String `tfsdk:"metastore_id"` + // Name of the external location. + Name types.String `tfsdk:"name"` + // The owner of the external location. + Owner types.String `tfsdk:"owner"` + // Indicates whether the external location is read-only. + ReadOnly types.Bool `tfsdk:"read_only"` + // Time at which external location this was last modified, in epoch + // milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified the external location. + UpdatedBy types.String `tfsdk:"updated_by"` + // Path URL of the external location. + Url types.String `tfsdk:"url"` +} + +// Detailed status of an online table. Shown if the online table is in the +// OFFLINE_FAILED or the ONLINE_PIPELINE_FAILED state. +type FailedStatus struct { + // The last source table Delta version that was synced to the online table. + // Note that this Delta version may only be partially synced to the online + // table. Only populated if the table is still online and available for + // serving. + LastProcessedCommitVersion types.Int64 `tfsdk:"last_processed_commit_version"` + // The timestamp of the last time any data was synchronized from the source + // table to the online table. Only populated if the table is still online + // and available for serving. + Timestamp types.String `tfsdk:"timestamp"` +} + +type ForeignKeyConstraint struct { + // Column names for this constraint. + ChildColumns []types.String `tfsdk:"child_columns"` + // The name of the constraint. + Name types.String `tfsdk:"name"` + // Column names for this constraint. + ParentColumns []types.String `tfsdk:"parent_columns"` + // The full name of the parent constraint. + ParentTable types.String `tfsdk:"parent_table"` +} + +// A function that is dependent on a SQL object. +type FunctionDependency struct { + // Full name of the dependent function, in the form of + // __catalog_name__.__schema_name__.__function_name__. + FunctionFullName types.String `tfsdk:"function_full_name"` +} + +type FunctionInfo struct { + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly types.Bool `tfsdk:"browse_only"` + // Name of parent catalog. + CatalogName types.String `tfsdk:"catalog_name"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Time at which this function was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of function creator. + CreatedBy types.String `tfsdk:"created_by"` + // Scalar function return data type. + DataType ColumnTypeName `tfsdk:"data_type"` + // External function language. + ExternalLanguage types.String `tfsdk:"external_language"` + // External function name. + ExternalName types.String `tfsdk:"external_name"` + // Pretty printed function data type. + FullDataType types.String `tfsdk:"full_data_type"` + // Full name of function, in form of + // __catalog_name__.__schema_name__.__function__name__ + FullName types.String `tfsdk:"full_name"` + // Id of Function, relative to parent schema. + FunctionId types.String `tfsdk:"function_id"` + + InputParams *FunctionParameterInfos `tfsdk:"input_params"` + // Whether the function is deterministic. + IsDeterministic types.Bool `tfsdk:"is_deterministic"` + // Function null call. + IsNullCall types.Bool `tfsdk:"is_null_call"` + // Unique identifier of parent metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // Name of function, relative to parent schema. + Name types.String `tfsdk:"name"` + // Username of current owner of function. + Owner types.String `tfsdk:"owner"` + // Function parameter style. **S** is the value for SQL. + ParameterStyle FunctionInfoParameterStyle `tfsdk:"parameter_style"` + // JSON-serialized key-value pair map, encoded (escaped) as a string. + Properties types.String `tfsdk:"properties"` + // Table function return parameters. + ReturnParams *FunctionParameterInfos `tfsdk:"return_params"` + // Function language. When **EXTERNAL** is used, the language of the routine + // function should be specified in the __external_language__ field, and the + // __return_params__ of the function cannot be used (as **TABLE** return + // type is not supported), and the __sql_data_access__ field must be + // **NO_SQL**. + RoutineBody FunctionInfoRoutineBody `tfsdk:"routine_body"` + // Function body. + RoutineDefinition types.String `tfsdk:"routine_definition"` + // Function dependencies. + RoutineDependencies *DependencyList `tfsdk:"routine_dependencies"` + // Name of parent schema relative to its parent catalog. + SchemaName types.String `tfsdk:"schema_name"` + // Function security type. + SecurityType FunctionInfoSecurityType `tfsdk:"security_type"` + // Specific name of the function; Reserved for future use. + SpecificName types.String `tfsdk:"specific_name"` + // Function SQL data access. + SqlDataAccess FunctionInfoSqlDataAccess `tfsdk:"sql_data_access"` + // List of schemes whose objects can be referenced without qualification. + SqlPath types.String `tfsdk:"sql_path"` + // Time at which this function was created, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified function. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +// Function parameter style. **S** is the value for SQL. +type FunctionInfoParameterStyle string + +const FunctionInfoParameterStyleS FunctionInfoParameterStyle = `S` + +// String representation for [fmt.Print] +func (f *FunctionInfoParameterStyle) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionInfoParameterStyle) Set(v string) error { + switch v { + case `S`: + *f = FunctionInfoParameterStyle(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "S"`, v) + } +} + +// Type always returns FunctionInfoParameterStyle to satisfy [pflag.Value] interface +func (f *FunctionInfoParameterStyle) Type() string { + return "FunctionInfoParameterStyle" +} + +// Function language. When **EXTERNAL** is used, the language of the routine +// function should be specified in the __external_language__ field, and the +// __return_params__ of the function cannot be used (as **TABLE** return type is +// not supported), and the __sql_data_access__ field must be **NO_SQL**. +type FunctionInfoRoutineBody string + +const FunctionInfoRoutineBodyExternal FunctionInfoRoutineBody = `EXTERNAL` + +const FunctionInfoRoutineBodySql FunctionInfoRoutineBody = `SQL` + +// String representation for [fmt.Print] +func (f *FunctionInfoRoutineBody) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionInfoRoutineBody) Set(v string) error { + switch v { + case `EXTERNAL`, `SQL`: + *f = FunctionInfoRoutineBody(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL", "SQL"`, v) + } +} + +// Type always returns FunctionInfoRoutineBody to satisfy [pflag.Value] interface +func (f *FunctionInfoRoutineBody) Type() string { + return "FunctionInfoRoutineBody" +} + +// Function security type. +type FunctionInfoSecurityType string + +const FunctionInfoSecurityTypeDefiner FunctionInfoSecurityType = `DEFINER` + +// String representation for [fmt.Print] +func (f *FunctionInfoSecurityType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionInfoSecurityType) Set(v string) error { + switch v { + case `DEFINER`: + *f = FunctionInfoSecurityType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEFINER"`, v) + } +} + +// Type always returns FunctionInfoSecurityType to satisfy [pflag.Value] interface +func (f *FunctionInfoSecurityType) Type() string { + return "FunctionInfoSecurityType" +} + +// Function SQL data access. +type FunctionInfoSqlDataAccess string + +const FunctionInfoSqlDataAccessContainsSql FunctionInfoSqlDataAccess = `CONTAINS_SQL` + +const FunctionInfoSqlDataAccessNoSql FunctionInfoSqlDataAccess = `NO_SQL` + +const FunctionInfoSqlDataAccessReadsSqlData FunctionInfoSqlDataAccess = `READS_SQL_DATA` + +// String representation for [fmt.Print] +func (f *FunctionInfoSqlDataAccess) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionInfoSqlDataAccess) Set(v string) error { + switch v { + case `CONTAINS_SQL`, `NO_SQL`, `READS_SQL_DATA`: + *f = FunctionInfoSqlDataAccess(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CONTAINS_SQL", "NO_SQL", "READS_SQL_DATA"`, v) + } +} + +// Type always returns FunctionInfoSqlDataAccess to satisfy [pflag.Value] interface +func (f *FunctionInfoSqlDataAccess) Type() string { + return "FunctionInfoSqlDataAccess" +} + +type FunctionParameterInfo struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Name of parameter. + Name types.String `tfsdk:"name"` + // Default value of the parameter. + ParameterDefault types.String `tfsdk:"parameter_default"` + // The mode of the function parameter. + ParameterMode FunctionParameterMode `tfsdk:"parameter_mode"` + // The type of function parameter. + ParameterType FunctionParameterType `tfsdk:"parameter_type"` + // Ordinal position of column (starting at position 0). + Position types.Int64 `tfsdk:"position"` + // Format of IntervalType. + TypeIntervalType types.String `tfsdk:"type_interval_type"` + // Full data type spec, JSON-serialized. + TypeJson types.String `tfsdk:"type_json"` + // Name of type (INT, STRUCT, MAP, etc.). + TypeName ColumnTypeName `tfsdk:"type_name"` + // Digits of precision; required on Create for DecimalTypes. + TypePrecision types.Int64 `tfsdk:"type_precision"` + // Digits to right of decimal; Required on Create for DecimalTypes. + TypeScale types.Int64 `tfsdk:"type_scale"` + // Full data type spec, SQL/catalogString text. + TypeText types.String `tfsdk:"type_text"` +} + +type FunctionParameterInfos struct { + // The array of __FunctionParameterInfo__ definitions of the function's + // parameters. + Parameters []FunctionParameterInfo `tfsdk:"parameters"` +} + +// The mode of the function parameter. +type FunctionParameterMode string + +const FunctionParameterModeIn FunctionParameterMode = `IN` + +// String representation for [fmt.Print] +func (f *FunctionParameterMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionParameterMode) Set(v string) error { + switch v { + case `IN`: + *f = FunctionParameterMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "IN"`, v) + } +} + +// Type always returns FunctionParameterMode to satisfy [pflag.Value] interface +func (f *FunctionParameterMode) Type() string { + return "FunctionParameterMode" +} + +// The type of function parameter. +type FunctionParameterType string + +const FunctionParameterTypeColumn FunctionParameterType = `COLUMN` + +const FunctionParameterTypeParam FunctionParameterType = `PARAM` + +// String representation for [fmt.Print] +func (f *FunctionParameterType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionParameterType) Set(v string) error { + switch v { + case `COLUMN`, `PARAM`: + *f = FunctionParameterType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COLUMN", "PARAM"`, v) + } +} + +// Type always returns FunctionParameterType to satisfy [pflag.Value] interface +func (f *FunctionParameterType) Type() string { + return "FunctionParameterType" +} + +// Gets the metastore assignment for a workspace +type GetAccountMetastoreAssignmentRequest struct { + // Workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +// Get a metastore +type GetAccountMetastoreRequest struct { + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` +} + +// Gets the named storage credential +type GetAccountStorageCredentialRequest struct { + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` + // Name of the storage credential. + StorageCredentialName types.String `tfsdk:"-" url:"-"` +} + +// Get an artifact allowlist +type GetArtifactAllowlistRequest struct { + // The artifact type of the allowlist. + ArtifactType ArtifactType `tfsdk:"-" url:"-"` +} + +// Get securable workspace bindings +type GetBindingsRequest struct { + // The name of the securable. + SecurableName types.String `tfsdk:"-" url:"-"` + // The type of the securable. + SecurableType types.String `tfsdk:"-" url:"-"` +} + +// Get Model Version By Alias +type GetByAliasRequest struct { + // The name of the alias + Alias types.String `tfsdk:"-" url:"-"` + // The three-level (fully qualified) name of the registered model + FullName types.String `tfsdk:"-" url:"-"` +} + +// Get a catalog +type GetCatalogRequest struct { + // Whether to include catalogs in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // The name of the catalog. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get a connection +type GetConnectionRequest struct { + // Name of the connection. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get effective permissions +type GetEffectiveRequest struct { + // Full name of securable. + FullName types.String `tfsdk:"-" url:"-"` + // If provided, only the effective permissions for the specified principal + // (user or group) are returned. + Principal types.String `tfsdk:"-" url:"principal,omitempty"` + // Type of securable. + SecurableType SecurableType `tfsdk:"-" url:"-"` +} + +// Get an external location +type GetExternalLocationRequest struct { + // Whether to include external locations in the response for which the + // principal can only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Name of the external location. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get a function +type GetFunctionRequest struct { + // Whether to include functions in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // The fully-qualified name of the function (of the form + // __catalog_name__.__schema_name__.__function__name__). + Name types.String `tfsdk:"-" url:"-"` +} + +// Get permissions +type GetGrantRequest struct { + // Full name of securable. + FullName types.String `tfsdk:"-" url:"-"` + // If provided, only the permissions for the specified principal (user or + // group) are returned. + Principal types.String `tfsdk:"-" url:"principal,omitempty"` + // Type of securable. + SecurableType SecurableType `tfsdk:"-" url:"-"` +} + +// Get a metastore +type GetMetastoreRequest struct { + // Unique ID of the metastore. + Id types.String `tfsdk:"-" url:"-"` +} + +type GetMetastoreSummaryResponse struct { + // Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`). + Cloud types.String `tfsdk:"cloud"` + // Time at which this metastore was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of metastore creator. + CreatedBy types.String `tfsdk:"created_by"` + // Unique identifier of the metastore's (Default) Data Access Configuration. + DefaultDataAccessConfigId types.String `tfsdk:"default_data_access_config_id"` + // The organization name of a Delta Sharing entity, to be used in + // Databricks-to-Databricks Delta Sharing as the official name. + DeltaSharingOrganizationName types.String `tfsdk:"delta_sharing_organization_name"` + // The lifetime of delta sharing recipient token in seconds. + DeltaSharingRecipientTokenLifetimeInSeconds types.Int64 `tfsdk:"delta_sharing_recipient_token_lifetime_in_seconds"` + // The scope of Delta Sharing enabled for the metastore. + DeltaSharingScope GetMetastoreSummaryResponseDeltaSharingScope `tfsdk:"delta_sharing_scope"` + // Globally unique metastore ID across clouds and regions, of the form + // `cloud:region:metastore_id`. + GlobalMetastoreId types.String `tfsdk:"global_metastore_id"` + // Unique identifier of metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // The user-specified name of the metastore. + Name types.String `tfsdk:"name"` + // The owner of the metastore. + Owner types.String `tfsdk:"owner"` + // Privilege model version of the metastore, of the form `major.minor` + // (e.g., `1.0`). + PrivilegeModelVersion types.String `tfsdk:"privilege_model_version"` + // Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). + Region types.String `tfsdk:"region"` + // The storage root URL for metastore + StorageRoot types.String `tfsdk:"storage_root"` + // UUID of storage credential to access the metastore storage_root. + StorageRootCredentialId types.String `tfsdk:"storage_root_credential_id"` + // Name of the storage credential to access the metastore storage_root. + StorageRootCredentialName types.String `tfsdk:"storage_root_credential_name"` + // Time at which the metastore was last modified, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified the metastore. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +// The scope of Delta Sharing enabled for the metastore. +type GetMetastoreSummaryResponseDeltaSharingScope string + +const GetMetastoreSummaryResponseDeltaSharingScopeInternal GetMetastoreSummaryResponseDeltaSharingScope = `INTERNAL` + +const GetMetastoreSummaryResponseDeltaSharingScopeInternalAndExternal GetMetastoreSummaryResponseDeltaSharingScope = `INTERNAL_AND_EXTERNAL` + +// String representation for [fmt.Print] +func (f *GetMetastoreSummaryResponseDeltaSharingScope) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetMetastoreSummaryResponseDeltaSharingScope) Set(v string) error { + switch v { + case `INTERNAL`, `INTERNAL_AND_EXTERNAL`: + *f = GetMetastoreSummaryResponseDeltaSharingScope(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INTERNAL", "INTERNAL_AND_EXTERNAL"`, v) + } +} + +// Type always returns GetMetastoreSummaryResponseDeltaSharingScope to satisfy [pflag.Value] interface +func (f *GetMetastoreSummaryResponseDeltaSharingScope) Type() string { + return "GetMetastoreSummaryResponseDeltaSharingScope" +} + +// Get a Model Version +type GetModelVersionRequest struct { + // The three-level (fully qualified) name of the model version + FullName types.String `tfsdk:"-" url:"-"` + // Whether to include model versions in the response for which the principal + // can only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // The integer version number of the model version + Version types.Int64 `tfsdk:"-" url:"-"` +} + +// Get an Online Table +type GetOnlineTableRequest struct { + // Full three-part (catalog, schema, table) name of the table. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get a table monitor +type GetQualityMonitorRequest struct { + // Full name of the table. + TableName types.String `tfsdk:"-" url:"-"` +} + +// Get refresh +type GetRefreshRequest struct { + // ID of the refresh. + RefreshId types.String `tfsdk:"-" url:"-"` + // Full name of the table. + TableName types.String `tfsdk:"-" url:"-"` +} + +// Get a Registered Model +type GetRegisteredModelRequest struct { + // The three-level (fully qualified) name of the registered model + FullName types.String `tfsdk:"-" url:"-"` + // Whether to include registered models in the response for which the + // principal can only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` +} + +// Get a schema +type GetSchemaRequest struct { + // Full name of the schema. + FullName types.String `tfsdk:"-" url:"-"` + // Whether to include schemas in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` +} + +// Get a credential +type GetStorageCredentialRequest struct { + // Name of the storage credential. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get a table +type GetTableRequest struct { + // Full name of the table. + FullName types.String `tfsdk:"-" url:"-"` + // Whether to include tables in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Whether delta metadata should be included in the response. + IncludeDeltaMetadata types.Bool `tfsdk:"-" url:"include_delta_metadata,omitempty"` +} + +// Get catalog workspace bindings +type GetWorkspaceBindingRequest struct { + // The name of the catalog. + Name types.String `tfsdk:"-" url:"-"` +} + +// Whether the current securable is accessible from all workspaces or a specific +// set of workspaces. +type IsolationMode string + +const IsolationModeIsolationModeIsolated IsolationMode = `ISOLATION_MODE_ISOLATED` + +const IsolationModeIsolationModeOpen IsolationMode = `ISOLATION_MODE_OPEN` + +// String representation for [fmt.Print] +func (f *IsolationMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *IsolationMode) Set(v string) error { + switch v { + case `ISOLATION_MODE_ISOLATED`, `ISOLATION_MODE_OPEN`: + *f = IsolationMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ISOLATION_MODE_ISOLATED", "ISOLATION_MODE_OPEN"`, v) + } +} + +// Type always returns IsolationMode to satisfy [pflag.Value] interface +func (f *IsolationMode) Type() string { + return "IsolationMode" +} + +// Get all workspaces assigned to a metastore +type ListAccountMetastoreAssignmentsRequest struct { + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` +} + +// The list of workspaces to which the given metastore is assigned. +type ListAccountMetastoreAssignmentsResponse struct { + WorkspaceIds []types.Int64 `tfsdk:"workspace_ids"` +} + +// Get all storage credentials assigned to a metastore +type ListAccountStorageCredentialsRequest struct { + // Unity Catalog metastore ID + MetastoreId types.String `tfsdk:"-" url:"-"` +} + +type ListAccountStorageCredentialsResponse struct { + // An array of metastore storage credentials. + StorageCredentials []StorageCredentialInfo `tfsdk:"storage_credentials"` +} + +// List catalogs +type ListCatalogsRequest struct { + // Whether to include catalogs in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Maximum number of catalogs to return. - when set to 0, the page length is + // set to a server configured value (recommended); - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to a value less than 0, an invalid parameter + // error is returned; - If not set, all valid catalogs are returned (not + // recommended). - Note: The number of returned catalogs might be less than + // the specified max_results size, even zero. The only definitive indication + // that no further catalogs can be fetched is when the next_page_token is + // unset from the response. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListCatalogsResponse struct { + // An array of catalog information objects. + Catalogs []CatalogInfo `tfsdk:"catalogs"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List connections +type ListConnectionsRequest struct { + // Maximum number of connections to return. - If not set, all connections + // are returned (not recommended). - when set to a value greater than 0, the + // page length is the minimum of this value and a server configured value; - + // when set to 0, the page length is set to a server configured value + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListConnectionsResponse struct { + // An array of connection information objects. + Connections []ConnectionInfo `tfsdk:"connections"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List external locations +type ListExternalLocationsRequest struct { + // Whether to include external locations in the response for which the + // principal can only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Maximum number of external locations to return. If not set, all the + // external locations are returned (not recommended). - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to 0, the page length is set to a server + // configured value (recommended); - when set to a value less than 0, an + // invalid parameter error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListExternalLocationsResponse struct { + // An array of external locations. + ExternalLocations []ExternalLocationInfo `tfsdk:"external_locations"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List functions +type ListFunctionsRequest struct { + // Name of parent catalog for functions of interest. + CatalogName types.String `tfsdk:"-" url:"catalog_name"` + // Whether to include functions in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Maximum number of functions to return. If not set, all the functions are + // returned (not recommended). - when set to a value greater than 0, the + // page length is the minimum of this value and a server configured value; - + // when set to 0, the page length is set to a server configured value + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // Parent schema of functions. + SchemaName types.String `tfsdk:"-" url:"schema_name"` +} + +type ListFunctionsResponse struct { + // An array of function information objects. + Functions []FunctionInfo `tfsdk:"functions"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` +} + +type ListMetastoresResponse struct { + // An array of metastore information objects. + Metastores []MetastoreInfo `tfsdk:"metastores"` +} + +// List Model Versions +type ListModelVersionsRequest struct { + // The full three-level name of the registered model under which to list + // model versions + FullName types.String `tfsdk:"-" url:"-"` + // Whether to include model versions in the response for which the principal + // can only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Maximum number of model versions to return. If not set, the page length + // is set to a server configured value (100, as of 1/3/2024). - when set to + // a value greater than 0, the page length is the minimum of this value and + // a server configured value(1000, as of 1/3/2024); - when set to 0, the + // page length is set to a server configured value (100, as of 1/3/2024) + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListModelVersionsResponse struct { + ModelVersions []ModelVersionInfo `tfsdk:"model_versions"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List refreshes +type ListRefreshesRequest struct { + // Full name of the table. + TableName types.String `tfsdk:"-" url:"-"` +} + +// List Registered Models +type ListRegisteredModelsRequest struct { + // The identifier of the catalog under which to list registered models. If + // specified, schema_name must be specified. + CatalogName types.String `tfsdk:"-" url:"catalog_name,omitempty"` + // Whether to include registered models in the response for which the + // principal can only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Max number of registered models to return. + // + // If both catalog and schema are specified: - when max_results is not + // specified, the page length is set to a server configured value (10000, as + // of 4/2/2024). - when set to a value greater than 0, the page length is + // the minimum of this value and a server configured value (10000, as of + // 4/2/2024); - when set to 0, the page length is set to a server configured + // value (10000, as of 4/2/2024); - when set to a value less than 0, an + // invalid parameter error is returned; + // + // If neither schema nor catalog is specified: - when max_results is not + // specified, the page length is set to a server configured value (100, as + // of 4/2/2024). - when set to a value greater than 0, the page length is + // the minimum of this value and a server configured value (1000, as of + // 4/2/2024); - when set to 0, the page length is set to a server configured + // value (100, as of 4/2/2024); - when set to a value less than 0, an + // invalid parameter error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque token to send for the next page of results (pagination). + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // The identifier of the schema under which to list registered models. If + // specified, catalog_name must be specified. + SchemaName types.String `tfsdk:"-" url:"schema_name,omitempty"` +} + +type ListRegisteredModelsResponse struct { + // Opaque token for pagination. Omitted if there are no more results. + // page_token should be set to this value for fetching the next page. + NextPageToken types.String `tfsdk:"next_page_token"` + + RegisteredModels []RegisteredModelInfo `tfsdk:"registered_models"` +} + +// List schemas +type ListSchemasRequest struct { + // Parent catalog for schemas of interest. + CatalogName types.String `tfsdk:"-" url:"catalog_name"` + // Whether to include schemas in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Maximum number of schemas to return. If not set, all the schemas are + // returned (not recommended). - when set to a value greater than 0, the + // page length is the minimum of this value and a server configured value; - + // when set to 0, the page length is set to a server configured value + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListSchemasResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` + // An array of schema information objects. + Schemas []SchemaInfo `tfsdk:"schemas"` +} + +// List credentials +type ListStorageCredentialsRequest struct { + // Maximum number of storage credentials to return. If not set, all the + // storage credentials are returned (not recommended). - when set to a value + // greater than 0, the page length is the minimum of this value and a server + // configured value; - when set to 0, the page length is set to a server + // configured value (recommended); - when set to a value less than 0, an + // invalid parameter error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListStorageCredentialsResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` + + StorageCredentials []StorageCredentialInfo `tfsdk:"storage_credentials"` +} + +// List table summaries +type ListSummariesRequest struct { + // Name of parent catalog for tables of interest. + CatalogName types.String `tfsdk:"-" url:"catalog_name"` + // Maximum number of summaries for tables to return. If not set, the page + // length is set to a server configured value (10000, as of 1/5/2024). - + // when set to a value greater than 0, the page length is the minimum of + // this value and a server configured value (10000, as of 1/5/2024); - when + // set to 0, the page length is set to a server configured value (10000, as + // of 1/5/2024) (recommended); - when set to a value less than 0, an invalid + // parameter error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // A sql LIKE pattern (% and _) for schema names. All schemas will be + // returned if not set or empty. + SchemaNamePattern types.String `tfsdk:"-" url:"schema_name_pattern,omitempty"` + // A sql LIKE pattern (% and _) for table names. All tables will be returned + // if not set or empty. + TableNamePattern types.String `tfsdk:"-" url:"table_name_pattern,omitempty"` +} + +// List system schemas +type ListSystemSchemasRequest struct { + // The ID for the metastore in which the system schema resides. + MetastoreId types.String `tfsdk:"-" url:"-"` +} + +type ListSystemSchemasResponse struct { + // An array of system schema information objects. + Schemas []SystemSchemaInfo `tfsdk:"schemas"` +} + +type ListTableSummariesResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` + // List of table summaries. + Tables []TableSummary `tfsdk:"tables"` +} + +// List tables +type ListTablesRequest struct { + // Name of parent catalog for tables of interest. + CatalogName types.String `tfsdk:"-" url:"catalog_name"` + // Whether to include tables in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Whether delta metadata should be included in the response. + IncludeDeltaMetadata types.Bool `tfsdk:"-" url:"include_delta_metadata,omitempty"` + // Maximum number of tables to return. If not set, all the tables are + // returned (not recommended). - when set to a value greater than 0, the + // page length is the minimum of this value and a server configured value; - + // when set to 0, the page length is set to a server configured value + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Whether to omit the columns of the table from the response or not. + OmitColumns types.Bool `tfsdk:"-" url:"omit_columns,omitempty"` + // Whether to omit the properties of the table from the response or not. + OmitProperties types.Bool `tfsdk:"-" url:"omit_properties,omitempty"` + // Opaque token to send for the next page of results (pagination). + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // Parent schema of tables. + SchemaName types.String `tfsdk:"-" url:"schema_name"` +} + +type ListTablesResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` + // An array of table information objects. + Tables []TableInfo `tfsdk:"tables"` +} + +// List Volumes +type ListVolumesRequest struct { + // The identifier of the catalog + CatalogName types.String `tfsdk:"-" url:"catalog_name"` + // Whether to include volumes in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // Maximum number of volumes to return (page length). + // + // If not set, the page length is set to a server configured value (10000, + // as of 1/29/2024). - when set to a value greater than 0, the page length + // is the minimum of this value and a server configured value (10000, as of + // 1/29/2024); - when set to 0, the page length is set to a server + // configured value (10000, as of 1/29/2024) (recommended); - when set to a + // value less than 0, an invalid parameter error is returned; + // + // Note: this parameter controls only the maximum number of volumes to + // return. The actual number of volumes returned in a page may be smaller + // than this value, including 0, even if there are more pages. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque token returned by a previous request. It must be included in the + // request to retrieve the next page of results (pagination). + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // The identifier of the schema + SchemaName types.String `tfsdk:"-" url:"schema_name"` +} + +type ListVolumesResponseContent struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request to retrieve the next page of results. + NextPageToken types.String `tfsdk:"next_page_token"` + + Volumes []VolumeInfo `tfsdk:"volumes"` +} + +// The artifact pattern matching type +type MatchType string + +const MatchTypePrefixMatch MatchType = `PREFIX_MATCH` + +// String representation for [fmt.Print] +func (f *MatchType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MatchType) Set(v string) error { + switch v { + case `PREFIX_MATCH`: + *f = MatchType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PREFIX_MATCH"`, v) + } +} + +// Type always returns MatchType to satisfy [pflag.Value] interface +func (f *MatchType) Type() string { + return "MatchType" +} + +type MetastoreAssignment struct { + // The name of the default catalog in the metastore. + DefaultCatalogName types.String `tfsdk:"default_catalog_name"` + // The unique ID of the metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // The unique ID of the Databricks workspace. + WorkspaceId types.Int64 `tfsdk:"workspace_id"` +} + +type MetastoreInfo struct { + // Cloud vendor of the metastore home shard (e.g., `aws`, `azure`, `gcp`). + Cloud types.String `tfsdk:"cloud"` + // Time at which this metastore was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of metastore creator. + CreatedBy types.String `tfsdk:"created_by"` + // Unique identifier of the metastore's (Default) Data Access Configuration. + DefaultDataAccessConfigId types.String `tfsdk:"default_data_access_config_id"` + // The organization name of a Delta Sharing entity, to be used in + // Databricks-to-Databricks Delta Sharing as the official name. + DeltaSharingOrganizationName types.String `tfsdk:"delta_sharing_organization_name"` + // The lifetime of delta sharing recipient token in seconds. + DeltaSharingRecipientTokenLifetimeInSeconds types.Int64 `tfsdk:"delta_sharing_recipient_token_lifetime_in_seconds"` + // The scope of Delta Sharing enabled for the metastore. + DeltaSharingScope MetastoreInfoDeltaSharingScope `tfsdk:"delta_sharing_scope"` + // Globally unique metastore ID across clouds and regions, of the form + // `cloud:region:metastore_id`. + GlobalMetastoreId types.String `tfsdk:"global_metastore_id"` + // Unique identifier of metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // The user-specified name of the metastore. + Name types.String `tfsdk:"name"` + // The owner of the metastore. + Owner types.String `tfsdk:"owner"` + // Privilege model version of the metastore, of the form `major.minor` + // (e.g., `1.0`). + PrivilegeModelVersion types.String `tfsdk:"privilege_model_version"` + // Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). + Region types.String `tfsdk:"region"` + // The storage root URL for metastore + StorageRoot types.String `tfsdk:"storage_root"` + // UUID of storage credential to access the metastore storage_root. + StorageRootCredentialId types.String `tfsdk:"storage_root_credential_id"` + // Name of the storage credential to access the metastore storage_root. + StorageRootCredentialName types.String `tfsdk:"storage_root_credential_name"` + // Time at which the metastore was last modified, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified the metastore. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +// The scope of Delta Sharing enabled for the metastore. +type MetastoreInfoDeltaSharingScope string + +const MetastoreInfoDeltaSharingScopeInternal MetastoreInfoDeltaSharingScope = `INTERNAL` + +const MetastoreInfoDeltaSharingScopeInternalAndExternal MetastoreInfoDeltaSharingScope = `INTERNAL_AND_EXTERNAL` + +// String representation for [fmt.Print] +func (f *MetastoreInfoDeltaSharingScope) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MetastoreInfoDeltaSharingScope) Set(v string) error { + switch v { + case `INTERNAL`, `INTERNAL_AND_EXTERNAL`: + *f = MetastoreInfoDeltaSharingScope(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INTERNAL", "INTERNAL_AND_EXTERNAL"`, v) + } +} + +// Type always returns MetastoreInfoDeltaSharingScope to satisfy [pflag.Value] interface +func (f *MetastoreInfoDeltaSharingScope) Type() string { + return "MetastoreInfoDeltaSharingScope" +} + +type ModelVersionInfo struct { + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly types.Bool `tfsdk:"browse_only"` + // The name of the catalog containing the model version + CatalogName types.String `tfsdk:"catalog_name"` + // The comment attached to the model version + Comment types.String `tfsdk:"comment"` + + CreatedAt types.Int64 `tfsdk:"created_at"` + // The identifier of the user who created the model version + CreatedBy types.String `tfsdk:"created_by"` + // The unique identifier of the model version + Id types.String `tfsdk:"id"` + // The unique identifier of the metastore containing the model version + MetastoreId types.String `tfsdk:"metastore_id"` + // The name of the parent registered model of the model version, relative to + // parent schema + ModelName types.String `tfsdk:"model_name"` + // Model version dependencies, for feature-store packaged models + ModelVersionDependencies *DependencyList `tfsdk:"model_version_dependencies"` + // MLflow run ID used when creating the model version, if ``source`` was + // generated by an experiment run stored in an MLflow tracking server + RunId types.String `tfsdk:"run_id"` + // ID of the Databricks workspace containing the MLflow run that generated + // this model version, if applicable + RunWorkspaceId types.Int64 `tfsdk:"run_workspace_id"` + // The name of the schema containing the model version, relative to parent + // catalog + SchemaName types.String `tfsdk:"schema_name"` + // URI indicating the location of the source artifacts (files) for the model + // version + Source types.String `tfsdk:"source"` + // Current status of the model version. Newly created model versions start + // in PENDING_REGISTRATION status, then move to READY status once the model + // version files are uploaded and the model version is finalized. Only model + // versions in READY status can be loaded for inference or served. + Status ModelVersionInfoStatus `tfsdk:"status"` + // The storage location on the cloud under which model version data files + // are stored + StorageLocation types.String `tfsdk:"storage_location"` + + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // The identifier of the user who updated the model version last time + UpdatedBy types.String `tfsdk:"updated_by"` + // Integer model version number, used to reference the model version in API + // requests. + Version types.Int64 `tfsdk:"version"` +} + +// Current status of the model version. Newly created model versions start in +// PENDING_REGISTRATION status, then move to READY status once the model version +// files are uploaded and the model version is finalized. Only model versions in +// READY status can be loaded for inference or served. +type ModelVersionInfoStatus string + +const ModelVersionInfoStatusFailedRegistration ModelVersionInfoStatus = `FAILED_REGISTRATION` + +const ModelVersionInfoStatusPendingRegistration ModelVersionInfoStatus = `PENDING_REGISTRATION` + +const ModelVersionInfoStatusReady ModelVersionInfoStatus = `READY` + +// String representation for [fmt.Print] +func (f *ModelVersionInfoStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ModelVersionInfoStatus) Set(v string) error { + switch v { + case `FAILED_REGISTRATION`, `PENDING_REGISTRATION`, `READY`: + *f = ModelVersionInfoStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED_REGISTRATION", "PENDING_REGISTRATION", "READY"`, v) + } +} + +// Type always returns ModelVersionInfoStatus to satisfy [pflag.Value] interface +func (f *ModelVersionInfoStatus) Type() string { + return "ModelVersionInfoStatus" +} + +type MonitorCronSchedule struct { + // Read only field that indicates whether a schedule is paused or not. + PauseStatus MonitorCronSchedulePauseStatus `tfsdk:"pause_status"` + // The expression that determines when to run the monitor. See [examples]. + // + // [examples]: https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html + QuartzCronExpression types.String `tfsdk:"quartz_cron_expression"` + // The timezone id (e.g., ``"PST"``) in which to evaluate the quartz + // expression. + TimezoneId types.String `tfsdk:"timezone_id"` +} + +// Read only field that indicates whether a schedule is paused or not. +type MonitorCronSchedulePauseStatus string + +const MonitorCronSchedulePauseStatusPaused MonitorCronSchedulePauseStatus = `PAUSED` + +const MonitorCronSchedulePauseStatusUnpaused MonitorCronSchedulePauseStatus = `UNPAUSED` + +// String representation for [fmt.Print] +func (f *MonitorCronSchedulePauseStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorCronSchedulePauseStatus) Set(v string) error { + switch v { + case `PAUSED`, `UNPAUSED`: + *f = MonitorCronSchedulePauseStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PAUSED", "UNPAUSED"`, v) + } +} + +// Type always returns MonitorCronSchedulePauseStatus to satisfy [pflag.Value] interface +func (f *MonitorCronSchedulePauseStatus) Type() string { + return "MonitorCronSchedulePauseStatus" +} + +type MonitorDataClassificationConfig struct { + // Whether data classification is enabled. + Enabled types.Bool `tfsdk:"enabled"` +} + +type MonitorDestination struct { + // The list of email addresses to send the notification to. A maximum of 5 + // email addresses is supported. + EmailAddresses []types.String `tfsdk:"email_addresses"` +} + +type MonitorInferenceLog struct { + // Granularities for aggregating data into time windows based on their + // timestamp. Currently the following static granularities are supported: + // {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" + // week(s)"``, ``"1 month"``, ``"1 year"``}. + Granularities []types.String `tfsdk:"granularities"` + // Optional column that contains the ground truth for the prediction. + LabelCol types.String `tfsdk:"label_col"` + // Column that contains the id of the model generating the predictions. + // Metrics will be computed per model id by default, and also across all + // model ids. + ModelIdCol types.String `tfsdk:"model_id_col"` + // Column that contains the output/prediction from the model. + PredictionCol types.String `tfsdk:"prediction_col"` + // Optional column that contains the prediction probabilities for each class + // in a classification problem type. The values in this column should be a + // map, mapping each class label to the prediction probability for a given + // sample. The map should be of PySpark MapType(). + PredictionProbaCol types.String `tfsdk:"prediction_proba_col"` + // Problem type the model aims to solve. Determines the type of + // model-quality metrics that will be computed. + ProblemType MonitorInferenceLogProblemType `tfsdk:"problem_type"` + // Column that contains the timestamps of requests. The column must be one + // of the following: - A ``TimestampType`` column - A column whose values + // can be converted to timestamps through the pyspark ``to_timestamp`` + // [function]. + // + // [function]: https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html + TimestampCol types.String `tfsdk:"timestamp_col"` +} + +// Problem type the model aims to solve. Determines the type of model-quality +// metrics that will be computed. +type MonitorInferenceLogProblemType string + +const MonitorInferenceLogProblemTypeProblemTypeClassification MonitorInferenceLogProblemType = `PROBLEM_TYPE_CLASSIFICATION` + +const MonitorInferenceLogProblemTypeProblemTypeRegression MonitorInferenceLogProblemType = `PROBLEM_TYPE_REGRESSION` + +// String representation for [fmt.Print] +func (f *MonitorInferenceLogProblemType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorInferenceLogProblemType) Set(v string) error { + switch v { + case `PROBLEM_TYPE_CLASSIFICATION`, `PROBLEM_TYPE_REGRESSION`: + *f = MonitorInferenceLogProblemType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PROBLEM_TYPE_CLASSIFICATION", "PROBLEM_TYPE_REGRESSION"`, v) + } +} + +// Type always returns MonitorInferenceLogProblemType to satisfy [pflag.Value] interface +func (f *MonitorInferenceLogProblemType) Type() string { + return "MonitorInferenceLogProblemType" +} + +type MonitorInfo struct { + // The directory to store monitoring assets (e.g. dashboard, metric tables). + AssetsDir types.String `tfsdk:"assets_dir"` + // Name of the baseline table from which drift metrics are computed from. + // Columns in the monitored table should also be present in the baseline + // table. + BaselineTableName types.String `tfsdk:"baseline_table_name"` + // Custom metrics to compute on the monitored table. These can be aggregate + // metrics, derived metrics (from already computed aggregate metrics), or + // drift metrics (comparing metrics across time windows). + CustomMetrics []MonitorMetric `tfsdk:"custom_metrics"` + // Id of dashboard that visualizes the computed metrics. This can be empty + // if the monitor is in PENDING state. + DashboardId types.String `tfsdk:"dashboard_id"` + // The data classification config for the monitor. + DataClassificationConfig *MonitorDataClassificationConfig `tfsdk:"data_classification_config"` + // The full name of the drift metrics table. Format: + // __catalog_name__.__schema_name__.__table_name__. + DriftMetricsTableName types.String `tfsdk:"drift_metrics_table_name"` + // Configuration for monitoring inference logs. + InferenceLog *MonitorInferenceLog `tfsdk:"inference_log"` + // The latest failure message of the monitor (if any). + LatestMonitorFailureMsg types.String `tfsdk:"latest_monitor_failure_msg"` + // The version of the monitor config (e.g. 1,2,3). If negative, the monitor + // may be corrupted. + MonitorVersion types.String `tfsdk:"monitor_version"` + // The notification settings for the monitor. + Notifications *MonitorNotifications `tfsdk:"notifications"` + // Schema where output metric tables are created. + OutputSchemaName types.String `tfsdk:"output_schema_name"` + // The full name of the profile metrics table. Format: + // __catalog_name__.__schema_name__.__table_name__. + ProfileMetricsTableName types.String `tfsdk:"profile_metrics_table_name"` + // The schedule for automatically updating and refreshing metric tables. + Schedule *MonitorCronSchedule `tfsdk:"schedule"` + // List of column expressions to slice data with for targeted analysis. The + // data is grouped by each expression independently, resulting in a separate + // slice for each predicate and its complements. For high-cardinality + // columns, only the top 100 unique values by frequency will generate + // slices. + SlicingExprs []types.String `tfsdk:"slicing_exprs"` + // Configuration for monitoring snapshot tables. + Snapshot *MonitorSnapshot `tfsdk:"snapshot"` + // The status of the monitor. + Status MonitorInfoStatus `tfsdk:"status"` + // The full name of the table to monitor. Format: + // __catalog_name__.__schema_name__.__table_name__. + TableName types.String `tfsdk:"table_name"` + // Configuration for monitoring time series tables. + TimeSeries *MonitorTimeSeries `tfsdk:"time_series"` +} + +// The status of the monitor. +type MonitorInfoStatus string + +const MonitorInfoStatusMonitorStatusActive MonitorInfoStatus = `MONITOR_STATUS_ACTIVE` + +const MonitorInfoStatusMonitorStatusDeletePending MonitorInfoStatus = `MONITOR_STATUS_DELETE_PENDING` + +const MonitorInfoStatusMonitorStatusError MonitorInfoStatus = `MONITOR_STATUS_ERROR` + +const MonitorInfoStatusMonitorStatusFailed MonitorInfoStatus = `MONITOR_STATUS_FAILED` + +const MonitorInfoStatusMonitorStatusPending MonitorInfoStatus = `MONITOR_STATUS_PENDING` + +// String representation for [fmt.Print] +func (f *MonitorInfoStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorInfoStatus) Set(v string) error { + switch v { + case `MONITOR_STATUS_ACTIVE`, `MONITOR_STATUS_DELETE_PENDING`, `MONITOR_STATUS_ERROR`, `MONITOR_STATUS_FAILED`, `MONITOR_STATUS_PENDING`: + *f = MonitorInfoStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MONITOR_STATUS_ACTIVE", "MONITOR_STATUS_DELETE_PENDING", "MONITOR_STATUS_ERROR", "MONITOR_STATUS_FAILED", "MONITOR_STATUS_PENDING"`, v) + } +} + +// Type always returns MonitorInfoStatus to satisfy [pflag.Value] interface +func (f *MonitorInfoStatus) Type() string { + return "MonitorInfoStatus" +} + +type MonitorMetric struct { + // Jinja template for a SQL expression that specifies how to compute the + // metric. See [create metric definition]. + // + // [create metric definition]: https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition + Definition types.String `tfsdk:"definition"` + // A list of column names in the input table the metric should be computed + // for. Can use ``":table"`` to indicate that the metric needs information + // from multiple columns. + InputColumns []types.String `tfsdk:"input_columns"` + // Name of the metric in the output tables. + Name types.String `tfsdk:"name"` + // The output type of the custom metric. + OutputDataType types.String `tfsdk:"output_data_type"` + // Can only be one of ``"CUSTOM_METRIC_TYPE_AGGREGATE"``, + // ``"CUSTOM_METRIC_TYPE_DERIVED"``, or ``"CUSTOM_METRIC_TYPE_DRIFT"``. The + // ``"CUSTOM_METRIC_TYPE_AGGREGATE"`` and ``"CUSTOM_METRIC_TYPE_DERIVED"`` + // metrics are computed on a single table, whereas the + // ``"CUSTOM_METRIC_TYPE_DRIFT"`` compare metrics across baseline and input + // table, or across the two consecutive time windows. - + // CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your + // table - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed + // aggregate metrics - CUSTOM_METRIC_TYPE_DRIFT: depend on previously + // computed aggregate or derived metrics + Type MonitorMetricType `tfsdk:"type"` +} + +// Can only be one of “"CUSTOM_METRIC_TYPE_AGGREGATE"“, +// “"CUSTOM_METRIC_TYPE_DERIVED"“, or “"CUSTOM_METRIC_TYPE_DRIFT"“. The +// “"CUSTOM_METRIC_TYPE_AGGREGATE"“ and “"CUSTOM_METRIC_TYPE_DERIVED"“ +// metrics are computed on a single table, whereas the +// “"CUSTOM_METRIC_TYPE_DRIFT"“ compare metrics across baseline and input +// table, or across the two consecutive time windows. - +// CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your +// table - CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate +// metrics - CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate +// or derived metrics +type MonitorMetricType string + +const MonitorMetricTypeCustomMetricTypeAggregate MonitorMetricType = `CUSTOM_METRIC_TYPE_AGGREGATE` + +const MonitorMetricTypeCustomMetricTypeDerived MonitorMetricType = `CUSTOM_METRIC_TYPE_DERIVED` + +const MonitorMetricTypeCustomMetricTypeDrift MonitorMetricType = `CUSTOM_METRIC_TYPE_DRIFT` + +// String representation for [fmt.Print] +func (f *MonitorMetricType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorMetricType) Set(v string) error { + switch v { + case `CUSTOM_METRIC_TYPE_AGGREGATE`, `CUSTOM_METRIC_TYPE_DERIVED`, `CUSTOM_METRIC_TYPE_DRIFT`: + *f = MonitorMetricType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CUSTOM_METRIC_TYPE_AGGREGATE", "CUSTOM_METRIC_TYPE_DERIVED", "CUSTOM_METRIC_TYPE_DRIFT"`, v) + } +} + +// Type always returns MonitorMetricType to satisfy [pflag.Value] interface +func (f *MonitorMetricType) Type() string { + return "MonitorMetricType" +} + +type MonitorNotifications struct { + // Who to send notifications to on monitor failure. + OnFailure *MonitorDestination `tfsdk:"on_failure"` + // Who to send notifications to when new data classification tags are + // detected. + OnNewClassificationTagDetected *MonitorDestination `tfsdk:"on_new_classification_tag_detected"` +} + +type MonitorRefreshInfo struct { + // Time at which refresh operation completed (milliseconds since 1/1/1970 + // UTC). + EndTimeMs types.Int64 `tfsdk:"end_time_ms"` + // An optional message to give insight into the current state of the job + // (e.g. FAILURE messages). + Message types.String `tfsdk:"message"` + // Unique id of the refresh operation. + RefreshId types.Int64 `tfsdk:"refresh_id"` + // Time at which refresh operation was initiated (milliseconds since + // 1/1/1970 UTC). + StartTimeMs types.Int64 `tfsdk:"start_time_ms"` + // The current state of the refresh. + State MonitorRefreshInfoState `tfsdk:"state"` + // The method by which the refresh was triggered. + Trigger MonitorRefreshInfoTrigger `tfsdk:"trigger"` +} + +// The current state of the refresh. +type MonitorRefreshInfoState string + +const MonitorRefreshInfoStateCanceled MonitorRefreshInfoState = `CANCELED` + +const MonitorRefreshInfoStateFailed MonitorRefreshInfoState = `FAILED` + +const MonitorRefreshInfoStatePending MonitorRefreshInfoState = `PENDING` + +const MonitorRefreshInfoStateRunning MonitorRefreshInfoState = `RUNNING` + +const MonitorRefreshInfoStateSuccess MonitorRefreshInfoState = `SUCCESS` + +// String representation for [fmt.Print] +func (f *MonitorRefreshInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorRefreshInfoState) Set(v string) error { + switch v { + case `CANCELED`, `FAILED`, `PENDING`, `RUNNING`, `SUCCESS`: + *f = MonitorRefreshInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "FAILED", "PENDING", "RUNNING", "SUCCESS"`, v) + } +} + +// Type always returns MonitorRefreshInfoState to satisfy [pflag.Value] interface +func (f *MonitorRefreshInfoState) Type() string { + return "MonitorRefreshInfoState" +} + +// The method by which the refresh was triggered. +type MonitorRefreshInfoTrigger string + +const MonitorRefreshInfoTriggerManual MonitorRefreshInfoTrigger = `MANUAL` + +const MonitorRefreshInfoTriggerSchedule MonitorRefreshInfoTrigger = `SCHEDULE` + +// String representation for [fmt.Print] +func (f *MonitorRefreshInfoTrigger) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MonitorRefreshInfoTrigger) Set(v string) error { + switch v { + case `MANUAL`, `SCHEDULE`: + *f = MonitorRefreshInfoTrigger(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MANUAL", "SCHEDULE"`, v) + } +} + +// Type always returns MonitorRefreshInfoTrigger to satisfy [pflag.Value] interface +func (f *MonitorRefreshInfoTrigger) Type() string { + return "MonitorRefreshInfoTrigger" +} + +type MonitorRefreshListResponse struct { + // List of refreshes. + Refreshes []MonitorRefreshInfo `tfsdk:"refreshes"` +} + +type MonitorSnapshot struct { +} + +type MonitorTimeSeries struct { + // Granularities for aggregating data into time windows based on their + // timestamp. Currently the following static granularities are supported: + // {``"5 minutes"``, ``"30 minutes"``, ``"1 hour"``, ``"1 day"``, ``" + // week(s)"``, ``"1 month"``, ``"1 year"``}. + Granularities []types.String `tfsdk:"granularities"` + // Column that contains the timestamps of requests. The column must be one + // of the following: - A ``TimestampType`` column - A column whose values + // can be converted to timestamps through the pyspark ``to_timestamp`` + // [function]. + // + // [function]: https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html + TimestampCol types.String `tfsdk:"timestamp_col"` +} + +type NamedTableConstraint struct { + // The name of the constraint. + Name types.String `tfsdk:"name"` +} + +// Online Table information. +type OnlineTable struct { + // Full three-part (catalog, schema, table) name of the table. + Name types.String `tfsdk:"name"` + // Specification of the online table. + Spec *OnlineTableSpec `tfsdk:"spec"` + // Online Table status + Status *OnlineTableStatus `tfsdk:"status"` + // Data serving REST API URL for this table + TableServingUrl types.String `tfsdk:"table_serving_url"` +} + +// Specification of an online table. +type OnlineTableSpec struct { + // Whether to create a full-copy pipeline -- a pipeline that stops after + // creates a full copy of the source table upon initialization and does not + // process any change data feeds (CDFs) afterwards. The pipeline can still + // be manually triggered afterwards, but it always perform a full copy of + // the source table and there are no incremental updates. This mode is + // useful for syncing views or tables without CDFs to online tables. Note + // that the full-copy pipeline only supports "triggered" scheduling policy. + PerformFullCopy types.Bool `tfsdk:"perform_full_copy"` + // ID of the associated pipeline. Generated by the server - cannot be set by + // the caller. + PipelineId types.String `tfsdk:"pipeline_id"` + // Primary Key columns to be used for data insert/update in the destination. + PrimaryKeyColumns []types.String `tfsdk:"primary_key_columns"` + // Pipeline runs continuously after generating the initial data. + RunContinuously *OnlineTableSpecContinuousSchedulingPolicy `tfsdk:"run_continuously"` + // Pipeline stops after generating the initial data and can be triggered + // later (manually, through a cron job or through data triggers) + RunTriggered *OnlineTableSpecTriggeredSchedulingPolicy `tfsdk:"run_triggered"` + // Three-part (catalog, schema, table) name of the source Delta table. + SourceTableFullName types.String `tfsdk:"source_table_full_name"` + // Time series key to deduplicate (tie-break) rows with the same primary + // key. + TimeseriesKey types.String `tfsdk:"timeseries_key"` +} + +type OnlineTableSpecContinuousSchedulingPolicy struct { +} + +type OnlineTableSpecTriggeredSchedulingPolicy struct { +} + +// The state of an online table. +type OnlineTableState string + +const OnlineTableStateOffline OnlineTableState = `OFFLINE` + +const OnlineTableStateOfflineFailed OnlineTableState = `OFFLINE_FAILED` + +const OnlineTableStateOnline OnlineTableState = `ONLINE` + +const OnlineTableStateOnlineContinuousUpdate OnlineTableState = `ONLINE_CONTINUOUS_UPDATE` + +const OnlineTableStateOnlineNoPendingUpdate OnlineTableState = `ONLINE_NO_PENDING_UPDATE` + +const OnlineTableStateOnlinePipelineFailed OnlineTableState = `ONLINE_PIPELINE_FAILED` + +const OnlineTableStateOnlineTableStateUnspecified OnlineTableState = `ONLINE_TABLE_STATE_UNSPECIFIED` + +const OnlineTableStateOnlineTriggeredUpdate OnlineTableState = `ONLINE_TRIGGERED_UPDATE` + +const OnlineTableStateOnlineUpdatingPipelineResources OnlineTableState = `ONLINE_UPDATING_PIPELINE_RESOURCES` + +const OnlineTableStateProvisioning OnlineTableState = `PROVISIONING` + +const OnlineTableStateProvisioningInitialSnapshot OnlineTableState = `PROVISIONING_INITIAL_SNAPSHOT` + +const OnlineTableStateProvisioningPipelineResources OnlineTableState = `PROVISIONING_PIPELINE_RESOURCES` + +// String representation for [fmt.Print] +func (f *OnlineTableState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *OnlineTableState) Set(v string) error { + switch v { + case `OFFLINE`, `OFFLINE_FAILED`, `ONLINE`, `ONLINE_CONTINUOUS_UPDATE`, `ONLINE_NO_PENDING_UPDATE`, `ONLINE_PIPELINE_FAILED`, `ONLINE_TABLE_STATE_UNSPECIFIED`, `ONLINE_TRIGGERED_UPDATE`, `ONLINE_UPDATING_PIPELINE_RESOURCES`, `PROVISIONING`, `PROVISIONING_INITIAL_SNAPSHOT`, `PROVISIONING_PIPELINE_RESOURCES`: + *f = OnlineTableState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OFFLINE", "OFFLINE_FAILED", "ONLINE", "ONLINE_CONTINUOUS_UPDATE", "ONLINE_NO_PENDING_UPDATE", "ONLINE_PIPELINE_FAILED", "ONLINE_TABLE_STATE_UNSPECIFIED", "ONLINE_TRIGGERED_UPDATE", "ONLINE_UPDATING_PIPELINE_RESOURCES", "PROVISIONING", "PROVISIONING_INITIAL_SNAPSHOT", "PROVISIONING_PIPELINE_RESOURCES"`, v) + } +} + +// Type always returns OnlineTableState to satisfy [pflag.Value] interface +func (f *OnlineTableState) Type() string { + return "OnlineTableState" +} + +// Status of an online table. +type OnlineTableStatus struct { + // Detailed status of an online table. Shown if the online table is in the + // ONLINE_CONTINUOUS_UPDATE or the ONLINE_UPDATING_PIPELINE_RESOURCES state. + ContinuousUpdateStatus *ContinuousUpdateStatus `tfsdk:"continuous_update_status"` + // The state of the online table. + DetailedState OnlineTableState `tfsdk:"detailed_state"` + // Detailed status of an online table. Shown if the online table is in the + // OFFLINE_FAILED or the ONLINE_PIPELINE_FAILED state. + FailedStatus *FailedStatus `tfsdk:"failed_status"` + // A text description of the current state of the online table. + Message types.String `tfsdk:"message"` + // Detailed status of an online table. Shown if the online table is in the + // PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT + // state. + ProvisioningStatus *ProvisioningStatus `tfsdk:"provisioning_status"` + // Detailed status of an online table. Shown if the online table is in the + // ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. + TriggeredUpdateStatus *TriggeredUpdateStatus `tfsdk:"triggered_update_status"` +} + +type PermissionsChange struct { + // The set of privileges to add. + Add []Privilege `tfsdk:"add"` + // The principal whose privileges we are changing. + Principal types.String `tfsdk:"principal"` + // The set of privileges to remove. + Remove []Privilege `tfsdk:"remove"` +} + +type PermissionsList struct { + // The privileges assigned to each principal + PrivilegeAssignments []PrivilegeAssignment `tfsdk:"privilege_assignments"` +} + +// Progress information of the Online Table data synchronization pipeline. +type PipelineProgress struct { + // The estimated time remaining to complete this update in seconds. + EstimatedCompletionTimeSeconds types.Float64 `tfsdk:"estimated_completion_time_seconds"` + // The source table Delta version that was last processed by the pipeline. + // The pipeline may not have completely processed this version yet. + LatestVersionCurrentlyProcessing types.Int64 `tfsdk:"latest_version_currently_processing"` + // The completion ratio of this update. This is a number between 0 and 1. + SyncProgressCompletion types.Float64 `tfsdk:"sync_progress_completion"` + // The number of rows that have been synced in this update. + SyncedRowCount types.Int64 `tfsdk:"synced_row_count"` + // The total number of rows that need to be synced in this update. This + // number may be an estimate. + TotalRowCount types.Int64 `tfsdk:"total_row_count"` +} + +type PrimaryKeyConstraint struct { + // Column names for this constraint. + ChildColumns []types.String `tfsdk:"child_columns"` + // The name of the constraint. + Name types.String `tfsdk:"name"` +} + +type Privilege string + +const PrivilegeAccess Privilege = `ACCESS` + +const PrivilegeAllPrivileges Privilege = `ALL_PRIVILEGES` + +const PrivilegeApplyTag Privilege = `APPLY_TAG` + +const PrivilegeCreate Privilege = `CREATE` + +const PrivilegeCreateCatalog Privilege = `CREATE_CATALOG` + +const PrivilegeCreateConnection Privilege = `CREATE_CONNECTION` + +const PrivilegeCreateExternalLocation Privilege = `CREATE_EXTERNAL_LOCATION` + +const PrivilegeCreateExternalTable Privilege = `CREATE_EXTERNAL_TABLE` + +const PrivilegeCreateExternalVolume Privilege = `CREATE_EXTERNAL_VOLUME` + +const PrivilegeCreateForeignCatalog Privilege = `CREATE_FOREIGN_CATALOG` + +const PrivilegeCreateFunction Privilege = `CREATE_FUNCTION` + +const PrivilegeCreateManagedStorage Privilege = `CREATE_MANAGED_STORAGE` + +const PrivilegeCreateMaterializedView Privilege = `CREATE_MATERIALIZED_VIEW` + +const PrivilegeCreateModel Privilege = `CREATE_MODEL` + +const PrivilegeCreateProvider Privilege = `CREATE_PROVIDER` + +const PrivilegeCreateRecipient Privilege = `CREATE_RECIPIENT` + +const PrivilegeCreateSchema Privilege = `CREATE_SCHEMA` + +const PrivilegeCreateServiceCredential Privilege = `CREATE_SERVICE_CREDENTIAL` + +const PrivilegeCreateShare Privilege = `CREATE_SHARE` + +const PrivilegeCreateStorageCredential Privilege = `CREATE_STORAGE_CREDENTIAL` + +const PrivilegeCreateTable Privilege = `CREATE_TABLE` + +const PrivilegeCreateView Privilege = `CREATE_VIEW` + +const PrivilegeCreateVolume Privilege = `CREATE_VOLUME` + +const PrivilegeExecute Privilege = `EXECUTE` + +const PrivilegeManageAllowlist Privilege = `MANAGE_ALLOWLIST` + +const PrivilegeModify Privilege = `MODIFY` + +const PrivilegeReadFiles Privilege = `READ_FILES` + +const PrivilegeReadPrivateFiles Privilege = `READ_PRIVATE_FILES` + +const PrivilegeReadVolume Privilege = `READ_VOLUME` + +const PrivilegeRefresh Privilege = `REFRESH` + +const PrivilegeSelect Privilege = `SELECT` + +const PrivilegeSetSharePermission Privilege = `SET_SHARE_PERMISSION` + +const PrivilegeUsage Privilege = `USAGE` + +const PrivilegeUseCatalog Privilege = `USE_CATALOG` + +const PrivilegeUseConnection Privilege = `USE_CONNECTION` + +const PrivilegeUseMarketplaceAssets Privilege = `USE_MARKETPLACE_ASSETS` + +const PrivilegeUseProvider Privilege = `USE_PROVIDER` + +const PrivilegeUseRecipient Privilege = `USE_RECIPIENT` + +const PrivilegeUseSchema Privilege = `USE_SCHEMA` + +const PrivilegeUseShare Privilege = `USE_SHARE` + +const PrivilegeWriteFiles Privilege = `WRITE_FILES` + +const PrivilegeWritePrivateFiles Privilege = `WRITE_PRIVATE_FILES` + +const PrivilegeWriteVolume Privilege = `WRITE_VOLUME` + +// String representation for [fmt.Print] +func (f *Privilege) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Privilege) Set(v string) error { + switch v { + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + *f = Privilege(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + } +} + +// Type always returns Privilege to satisfy [pflag.Value] interface +func (f *Privilege) Type() string { + return "Privilege" +} + +type PrivilegeAssignment struct { + // The principal (user email address or group name). + Principal types.String `tfsdk:"principal"` + // The privileges assigned to the principal. + Privileges []Privilege `tfsdk:"privileges"` +} + +// An object containing map of key-value properties attached to the connection. +type PropertiesKvPairs map[string]types.String + +// Status of an asynchronously provisioned resource. +type ProvisioningInfo struct { + State ProvisioningInfoState `tfsdk:"state"` +} + +type ProvisioningInfoState string + +const ProvisioningInfoStateActive ProvisioningInfoState = `ACTIVE` + +const ProvisioningInfoStateDeleting ProvisioningInfoState = `DELETING` + +const ProvisioningInfoStateFailed ProvisioningInfoState = `FAILED` + +const ProvisioningInfoStateProvisioning ProvisioningInfoState = `PROVISIONING` + +const ProvisioningInfoStateStateUnspecified ProvisioningInfoState = `STATE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *ProvisioningInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ProvisioningInfoState) Set(v string) error { + switch v { + case `ACTIVE`, `DELETING`, `FAILED`, `PROVISIONING`, `STATE_UNSPECIFIED`: + *f = ProvisioningInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "DELETING", "FAILED", "PROVISIONING", "STATE_UNSPECIFIED"`, v) + } +} + +// Type always returns ProvisioningInfoState to satisfy [pflag.Value] interface +func (f *ProvisioningInfoState) Type() string { + return "ProvisioningInfoState" +} + +// Detailed status of an online table. Shown if the online table is in the +// PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state. +type ProvisioningStatus struct { + // Details about initial data synchronization. Only populated when in the + // PROVISIONING_INITIAL_SNAPSHOT state. + InitialPipelineSyncProgress *PipelineProgress `tfsdk:"initial_pipeline_sync_progress"` +} + +// Get a Volume +type ReadVolumeRequest struct { + // Whether to include volumes in the response for which the principal can + // only access selective metadata for + IncludeBrowse types.Bool `tfsdk:"-" url:"include_browse,omitempty"` + // The three-level (fully qualified) name of the volume + Name types.String `tfsdk:"-" url:"-"` +} + +// Registered model alias. +type RegisteredModelAlias struct { + // Name of the alias, e.g. 'champion' or 'latest_stable' + AliasName types.String `tfsdk:"alias_name"` + // Integer version number of the model version to which this alias points. + VersionNum types.Int64 `tfsdk:"version_num"` +} + +type RegisteredModelInfo struct { + // List of aliases associated with the registered model + Aliases []RegisteredModelAlias `tfsdk:"aliases"` + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly types.Bool `tfsdk:"browse_only"` + // The name of the catalog where the schema and the registered model reside + CatalogName types.String `tfsdk:"catalog_name"` + // The comment attached to the registered model + Comment types.String `tfsdk:"comment"` + // Creation timestamp of the registered model in milliseconds since the Unix + // epoch + CreatedAt types.Int64 `tfsdk:"created_at"` + // The identifier of the user who created the registered model + CreatedBy types.String `tfsdk:"created_by"` + // The three-level (fully qualified) name of the registered model + FullName types.String `tfsdk:"full_name"` + // The unique identifier of the metastore + MetastoreId types.String `tfsdk:"metastore_id"` + // The name of the registered model + Name types.String `tfsdk:"name"` + // The identifier of the user who owns the registered model + Owner types.String `tfsdk:"owner"` + // The name of the schema where the registered model resides + SchemaName types.String `tfsdk:"schema_name"` + // The storage location on the cloud under which model version data files + // are stored + StorageLocation types.String `tfsdk:"storage_location"` + // Last-update timestamp of the registered model in milliseconds since the + // Unix epoch + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // The identifier of the user who updated the registered model last time + UpdatedBy types.String `tfsdk:"updated_by"` +} + +// Queue a metric refresh for a monitor +type RunRefreshRequest struct { + // Full name of the table. + TableName types.String `tfsdk:"-" url:"-"` +} + +type SchemaInfo struct { + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly types.Bool `tfsdk:"browse_only"` + // Name of parent catalog. + CatalogName types.String `tfsdk:"catalog_name"` + // The type of the parent catalog. + CatalogType types.String `tfsdk:"catalog_type"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Time at which this schema was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of schema creator. + CreatedBy types.String `tfsdk:"created_by"` + + EffectivePredictiveOptimizationFlag *EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `tfsdk:"enable_predictive_optimization"` + // Full name of schema, in form of __catalog_name__.__schema_name__. + FullName types.String `tfsdk:"full_name"` + // Unique identifier of parent metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // Name of schema, relative to parent catalog. + Name types.String `tfsdk:"name"` + // Username of current owner of schema. + Owner types.String `tfsdk:"owner"` + // A map of key-value properties attached to the securable. + Properties map[string]types.String `tfsdk:"properties"` + // The unique identifier of the schema. + SchemaId types.String `tfsdk:"schema_id"` + // Storage location for managed tables within schema. + StorageLocation types.String `tfsdk:"storage_location"` + // Storage root URL for managed tables within schema. + StorageRoot types.String `tfsdk:"storage_root"` + // Time at which this schema was created, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified schema. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +// A map of key-value properties attached to the securable. +type SecurableOptionsMap map[string]types.String + +// A map of key-value properties attached to the securable. +type SecurablePropertiesMap map[string]types.String + +// The type of Unity Catalog securable +type SecurableType string + +const SecurableTypeCatalog SecurableType = `catalog` + +const SecurableTypeConnection SecurableType = `connection` + +const SecurableTypeExternalLocation SecurableType = `external_location` + +const SecurableTypeFunction SecurableType = `function` + +const SecurableTypeMetastore SecurableType = `metastore` + +const SecurableTypePipeline SecurableType = `pipeline` + +const SecurableTypeProvider SecurableType = `provider` + +const SecurableTypeRecipient SecurableType = `recipient` + +const SecurableTypeSchema SecurableType = `schema` + +const SecurableTypeShare SecurableType = `share` + +const SecurableTypeStorageCredential SecurableType = `storage_credential` + +const SecurableTypeTable SecurableType = `table` + +const SecurableTypeVolume SecurableType = `volume` + +// String representation for [fmt.Print] +func (f *SecurableType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SecurableType) Set(v string) error { + switch v { + case `catalog`, `connection`, `external_location`, `function`, `metastore`, `pipeline`, `provider`, `recipient`, `schema`, `share`, `storage_credential`, `table`, `volume`: + *f = SecurableType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "catalog", "connection", "external_location", "function", "metastore", "pipeline", "provider", "recipient", "schema", "share", "storage_credential", "table", "volume"`, v) + } +} + +// Type always returns SecurableType to satisfy [pflag.Value] interface +func (f *SecurableType) Type() string { + return "SecurableType" +} + +type SetArtifactAllowlist struct { + // A list of allowed artifact match patterns. + ArtifactMatchers []ArtifactMatcher `tfsdk:"artifact_matchers"` + // The artifact type of the allowlist. + ArtifactType ArtifactType `tfsdk:"-" url:"-"` +} + +type SetRegisteredModelAliasRequest struct { + // The name of the alias + Alias types.String `tfsdk:"alias" url:"-"` + // Full name of the registered model + FullName types.String `tfsdk:"full_name" url:"-"` + // The version number of the model version to which the alias points + VersionNum types.Int64 `tfsdk:"version_num"` +} + +// Server-Side Encryption properties for clients communicating with AWS s3. +type SseEncryptionDetails struct { + // The type of key encryption to use (affects headers from s3 client). + Algorithm SseEncryptionDetailsAlgorithm `tfsdk:"algorithm"` + // When algorithm is **AWS_SSE_KMS** this field specifies the ARN of the SSE + // key to use. + AwsKmsKeyArn types.String `tfsdk:"aws_kms_key_arn"` +} + +// The type of key encryption to use (affects headers from s3 client). +type SseEncryptionDetailsAlgorithm string + +const SseEncryptionDetailsAlgorithmAwsSseKms SseEncryptionDetailsAlgorithm = `AWS_SSE_KMS` + +const SseEncryptionDetailsAlgorithmAwsSseS3 SseEncryptionDetailsAlgorithm = `AWS_SSE_S3` + +// String representation for [fmt.Print] +func (f *SseEncryptionDetailsAlgorithm) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SseEncryptionDetailsAlgorithm) Set(v string) error { + switch v { + case `AWS_SSE_KMS`, `AWS_SSE_S3`: + *f = SseEncryptionDetailsAlgorithm(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AWS_SSE_KMS", "AWS_SSE_S3"`, v) + } +} + +// Type always returns SseEncryptionDetailsAlgorithm to satisfy [pflag.Value] interface +func (f *SseEncryptionDetailsAlgorithm) Type() string { + return "SseEncryptionDetailsAlgorithm" +} + +type StorageCredentialInfo struct { + // The AWS IAM role configuration. + AwsIamRole *AwsIamRoleResponse `tfsdk:"aws_iam_role"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentityResponse `tfsdk:"azure_managed_identity"` + // The Azure service principal configuration. + AzureServicePrincipal *AzureServicePrincipal `tfsdk:"azure_service_principal"` + // The Cloudflare API token configuration. + CloudflareApiToken *CloudflareApiToken `tfsdk:"cloudflare_api_token"` + // Comment associated with the credential. + Comment types.String `tfsdk:"comment"` + // Time at which this Credential was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of credential creator. + CreatedBy types.String `tfsdk:"created_by"` + // The Databricks managed GCP service account configuration. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccountResponse `tfsdk:"databricks_gcp_service_account"` + // The unique identifier of the credential. + Id types.String `tfsdk:"id"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode IsolationMode `tfsdk:"isolation_mode"` + // Unique identifier of parent metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // The credential name. The name must be unique within the metastore. + Name types.String `tfsdk:"name"` + // Username of current owner of credential. + Owner types.String `tfsdk:"owner"` + // Whether the storage credential is only usable for read operations. + ReadOnly types.Bool `tfsdk:"read_only"` + // Time at which this credential was last modified, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified the credential. + UpdatedBy types.String `tfsdk:"updated_by"` + // Whether this credential is the current metastore's root storage + // credential. + UsedForManagedStorage types.Bool `tfsdk:"used_for_managed_storage"` +} + +type SystemSchemaInfo struct { + // Name of the system schema. + Schema types.String `tfsdk:"schema"` + // The current state of enablement for the system schema. An empty string + // means the system schema is available and ready for opt-in. + State SystemSchemaInfoState `tfsdk:"state"` +} + +// The current state of enablement for the system schema. An empty string means +// the system schema is available and ready for opt-in. +type SystemSchemaInfoState string + +const SystemSchemaInfoStateAvailable SystemSchemaInfoState = `AVAILABLE` + +const SystemSchemaInfoStateDisableInitialized SystemSchemaInfoState = `DISABLE_INITIALIZED` + +const SystemSchemaInfoStateEnableCompleted SystemSchemaInfoState = `ENABLE_COMPLETED` + +const SystemSchemaInfoStateEnableInitialized SystemSchemaInfoState = `ENABLE_INITIALIZED` + +const SystemSchemaInfoStateUnavailable SystemSchemaInfoState = `UNAVAILABLE` + +// String representation for [fmt.Print] +func (f *SystemSchemaInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SystemSchemaInfoState) Set(v string) error { + switch v { + case `AVAILABLE`, `DISABLE_INITIALIZED`, `ENABLE_COMPLETED`, `ENABLE_INITIALIZED`, `UNAVAILABLE`: + *f = SystemSchemaInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AVAILABLE", "DISABLE_INITIALIZED", "ENABLE_COMPLETED", "ENABLE_INITIALIZED", "UNAVAILABLE"`, v) + } +} + +// Type always returns SystemSchemaInfoState to satisfy [pflag.Value] interface +func (f *SystemSchemaInfoState) Type() string { + return "SystemSchemaInfoState" +} + +// A table constraint, as defined by *one* of the following fields being set: +// __primary_key_constraint__, __foreign_key_constraint__, +// __named_table_constraint__. +type TableConstraint struct { + ForeignKeyConstraint *ForeignKeyConstraint `tfsdk:"foreign_key_constraint"` + + NamedTableConstraint *NamedTableConstraint `tfsdk:"named_table_constraint"` + + PrimaryKeyConstraint *PrimaryKeyConstraint `tfsdk:"primary_key_constraint"` +} + +// A table that is dependent on a SQL object. +type TableDependency struct { + // Full name of the dependent table, in the form of + // __catalog_name__.__schema_name__.__table_name__. + TableFullName types.String `tfsdk:"table_full_name"` +} + +type TableExistsResponse struct { + // Whether the table exists or not. + TableExists types.Bool `tfsdk:"table_exists"` +} + +type TableInfo struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint types.String `tfsdk:"access_point"` + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly types.Bool `tfsdk:"browse_only"` + // Name of parent catalog. + CatalogName types.String `tfsdk:"catalog_name"` + // The array of __ColumnInfo__ definitions of the table's columns. + Columns []ColumnInfo `tfsdk:"columns"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Time at which this table was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of table creator. + CreatedBy types.String `tfsdk:"created_by"` + // Unique ID of the Data Access Configuration to use with the table data. + DataAccessConfigurationId types.String `tfsdk:"data_access_configuration_id"` + // Data source format + DataSourceFormat DataSourceFormat `tfsdk:"data_source_format"` + // Time at which this table was deleted, in epoch milliseconds. Field is + // omitted if table is not deleted. + DeletedAt types.Int64 `tfsdk:"deleted_at"` + // Information pertaining to current state of the delta table. + DeltaRuntimePropertiesKvpairs *DeltaRuntimePropertiesKvPairs `tfsdk:"delta_runtime_properties_kvpairs"` + + EffectivePredictiveOptimizationFlag *EffectivePredictiveOptimizationFlag `tfsdk:"effective_predictive_optimization_flag"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `tfsdk:"enable_predictive_optimization"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details"` + // Full name of table, in form of + // __catalog_name__.__schema_name__.__table_name__ + FullName types.String `tfsdk:"full_name"` + // Unique identifier of parent metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // Name of table, relative to parent schema. + Name types.String `tfsdk:"name"` + // Username of current owner of table. + Owner types.String `tfsdk:"owner"` + // The pipeline ID of the table. Applicable for tables created by pipelines + // (Materialized View, Streaming Table, etc.). + PipelineId types.String `tfsdk:"pipeline_id"` + // A map of key-value properties attached to the securable. + Properties map[string]types.String `tfsdk:"properties"` + + RowFilter *TableRowFilter `tfsdk:"row_filter"` + // Name of parent schema relative to its parent catalog. + SchemaName types.String `tfsdk:"schema_name"` + // List of schemes whose objects can be referenced without qualification. + SqlPath types.String `tfsdk:"sql_path"` + // Name of the storage credential, when a storage credential is configured + // for use with this table. + StorageCredentialName types.String `tfsdk:"storage_credential_name"` + // Storage root URL for table (for **MANAGED**, **EXTERNAL** tables) + StorageLocation types.String `tfsdk:"storage_location"` + // List of table constraints. Note: this field is not set in the output of + // the __listTables__ API. + TableConstraints []TableConstraint `tfsdk:"table_constraints"` + // The unique identifier of the table. + TableId types.String `tfsdk:"table_id"` + + TableType TableType `tfsdk:"table_type"` + // Time at which this table was last modified, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified the table. + UpdatedBy types.String `tfsdk:"updated_by"` + // View definition SQL (when __table_type__ is **VIEW**, + // **MATERIALIZED_VIEW**, or **STREAMING_TABLE**) + ViewDefinition types.String `tfsdk:"view_definition"` + // View dependencies (when table_type == **VIEW** or **MATERIALIZED_VIEW**, + // **STREAMING_TABLE**) - when DependencyList is None, the dependency is not + // provided; - when DependencyList is an empty list, the dependency is + // provided but is empty; - when DependencyList is not an empty list, + // dependencies are provided and recorded. + ViewDependencies *DependencyList `tfsdk:"view_dependencies"` +} + +type TableRowFilter struct { + // The full name of the row filter SQL UDF. + FunctionName types.String `tfsdk:"function_name"` + // The list of table columns to be passed as input to the row filter + // function. The column types should match the types of the filter function + // arguments. + InputColumnNames []types.String `tfsdk:"input_column_names"` +} + +type TableSummary struct { + // The full name of the table. + FullName types.String `tfsdk:"full_name"` + + TableType TableType `tfsdk:"table_type"` +} + +type TableType string + +const TableTypeExternal TableType = `EXTERNAL` + +const TableTypeExternalShallowClone TableType = `EXTERNAL_SHALLOW_CLONE` + +const TableTypeForeign TableType = `FOREIGN` + +const TableTypeManaged TableType = `MANAGED` + +const TableTypeManagedShallowClone TableType = `MANAGED_SHALLOW_CLONE` + +const TableTypeMaterializedView TableType = `MATERIALIZED_VIEW` + +const TableTypeStreamingTable TableType = `STREAMING_TABLE` + +const TableTypeView TableType = `VIEW` + +// String representation for [fmt.Print] +func (f *TableType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TableType) Set(v string) error { + switch v { + case `EXTERNAL`, `EXTERNAL_SHALLOW_CLONE`, `FOREIGN`, `MANAGED`, `MANAGED_SHALLOW_CLONE`, `MATERIALIZED_VIEW`, `STREAMING_TABLE`, `VIEW`: + *f = TableType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL", "EXTERNAL_SHALLOW_CLONE", "FOREIGN", "MANAGED", "MANAGED_SHALLOW_CLONE", "MATERIALIZED_VIEW", "STREAMING_TABLE", "VIEW"`, v) + } +} + +// Type always returns TableType to satisfy [pflag.Value] interface +func (f *TableType) Type() string { + return "TableType" +} + +// Detailed status of an online table. Shown if the online table is in the +// ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. +type TriggeredUpdateStatus struct { + // The last source table Delta version that was synced to the online table. + // Note that this Delta version may not be completely synced to the online + // table yet. + LastProcessedCommitVersion types.Int64 `tfsdk:"last_processed_commit_version"` + // The timestamp of the last time any data was synchronized from the source + // table to the online table. + Timestamp types.String `tfsdk:"timestamp"` + // Progress of the active data synchronization pipeline. + TriggeredUpdateProgress *PipelineProgress `tfsdk:"triggered_update_progress"` +} + +// Delete an assignment +type UnassignRequest struct { + // Query for the ID of the metastore to delete. + MetastoreId types.String `tfsdk:"-" url:"metastore_id"` + // A workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +type UnassignResponse struct { +} + +type UpdateAssignmentResponse struct { +} + +type UpdateCatalog struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `tfsdk:"enable_predictive_optimization"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode CatalogIsolationMode `tfsdk:"isolation_mode"` + // The name of the catalog. + Name types.String `tfsdk:"-" url:"-"` + // New name for the catalog. + NewName types.String `tfsdk:"new_name"` + // Username of current owner of catalog. + Owner types.String `tfsdk:"owner"` + // A map of key-value properties attached to the securable. + Properties map[string]types.String `tfsdk:"properties"` +} + +type UpdateConnection struct { + // Name of the connection. + Name types.String `tfsdk:"-" url:"-"` + // New name for the connection. + NewName types.String `tfsdk:"new_name"` + // A map of key-value properties attached to the securable. + Options map[string]types.String `tfsdk:"options"` + // Username of current owner of the connection. + Owner types.String `tfsdk:"owner"` +} + +type UpdateExternalLocation struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint types.String `tfsdk:"access_point"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Name of the storage credential used with this location. + CredentialName types.String `tfsdk:"credential_name"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details"` + // Force update even if changing url invalidates dependent external tables + // or mounts. + Force types.Bool `tfsdk:"force"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode IsolationMode `tfsdk:"isolation_mode"` + // Name of the external location. + Name types.String `tfsdk:"-" url:"-"` + // New name for the external location. + NewName types.String `tfsdk:"new_name"` + // The owner of the external location. + Owner types.String `tfsdk:"owner"` + // Indicates whether the external location is read-only. + ReadOnly types.Bool `tfsdk:"read_only"` + // Skips validation of the storage credential associated with the external + // location. + SkipValidation types.Bool `tfsdk:"skip_validation"` + // Path URL of the external location. + Url types.String `tfsdk:"url"` +} + +type UpdateFunction struct { + // The fully-qualified name of the function (of the form + // __catalog_name__.__schema_name__.__function__name__). + Name types.String `tfsdk:"-" url:"-"` + // Username of current owner of function. + Owner types.String `tfsdk:"owner"` +} + +type UpdateMetastore struct { + // The organization name of a Delta Sharing entity, to be used in + // Databricks-to-Databricks Delta Sharing as the official name. + DeltaSharingOrganizationName types.String `tfsdk:"delta_sharing_organization_name"` + // The lifetime of delta sharing recipient token in seconds. + DeltaSharingRecipientTokenLifetimeInSeconds types.Int64 `tfsdk:"delta_sharing_recipient_token_lifetime_in_seconds"` + // The scope of Delta Sharing enabled for the metastore. + DeltaSharingScope UpdateMetastoreDeltaSharingScope `tfsdk:"delta_sharing_scope"` + // Unique ID of the metastore. + Id types.String `tfsdk:"-" url:"-"` + // New name for the metastore. + NewName types.String `tfsdk:"new_name"` + // The owner of the metastore. + Owner types.String `tfsdk:"owner"` + // Privilege model version of the metastore, of the form `major.minor` + // (e.g., `1.0`). + PrivilegeModelVersion types.String `tfsdk:"privilege_model_version"` + // UUID of storage credential to access the metastore storage_root. + StorageRootCredentialId types.String `tfsdk:"storage_root_credential_id"` +} + +type UpdateMetastoreAssignment struct { + // The name of the default catalog for the metastore. + DefaultCatalogName types.String `tfsdk:"default_catalog_name"` + // The unique ID of the metastore. + MetastoreId types.String `tfsdk:"metastore_id"` + // A workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +// The scope of Delta Sharing enabled for the metastore. +type UpdateMetastoreDeltaSharingScope string + +const UpdateMetastoreDeltaSharingScopeInternal UpdateMetastoreDeltaSharingScope = `INTERNAL` + +const UpdateMetastoreDeltaSharingScopeInternalAndExternal UpdateMetastoreDeltaSharingScope = `INTERNAL_AND_EXTERNAL` + +// String representation for [fmt.Print] +func (f *UpdateMetastoreDeltaSharingScope) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateMetastoreDeltaSharingScope) Set(v string) error { + switch v { + case `INTERNAL`, `INTERNAL_AND_EXTERNAL`: + *f = UpdateMetastoreDeltaSharingScope(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INTERNAL", "INTERNAL_AND_EXTERNAL"`, v) + } +} + +// Type always returns UpdateMetastoreDeltaSharingScope to satisfy [pflag.Value] interface +func (f *UpdateMetastoreDeltaSharingScope) Type() string { + return "UpdateMetastoreDeltaSharingScope" +} + +type UpdateModelVersionRequest struct { + // The comment attached to the model version + Comment types.String `tfsdk:"comment"` + // The three-level (fully qualified) name of the model version + FullName types.String `tfsdk:"-" url:"-"` + // The integer version number of the model version + Version types.Int64 `tfsdk:"-" url:"-"` +} + +type UpdateMonitor struct { + // Name of the baseline table from which drift metrics are computed from. + // Columns in the monitored table should also be present in the baseline + // table. + BaselineTableName types.String `tfsdk:"baseline_table_name"` + // Custom metrics to compute on the monitored table. These can be aggregate + // metrics, derived metrics (from already computed aggregate metrics), or + // drift metrics (comparing metrics across time windows). + CustomMetrics []MonitorMetric `tfsdk:"custom_metrics"` + // Id of dashboard that visualizes the computed metrics. This can be empty + // if the monitor is in PENDING state. + DashboardId types.String `tfsdk:"dashboard_id"` + // The data classification config for the monitor. + DataClassificationConfig *MonitorDataClassificationConfig `tfsdk:"data_classification_config"` + // Configuration for monitoring inference logs. + InferenceLog *MonitorInferenceLog `tfsdk:"inference_log"` + // The notification settings for the monitor. + Notifications *MonitorNotifications `tfsdk:"notifications"` + // Schema where output metric tables are created. + OutputSchemaName types.String `tfsdk:"output_schema_name"` + // The schedule for automatically updating and refreshing metric tables. + Schedule *MonitorCronSchedule `tfsdk:"schedule"` + // List of column expressions to slice data with for targeted analysis. The + // data is grouped by each expression independently, resulting in a separate + // slice for each predicate and its complements. For high-cardinality + // columns, only the top 100 unique values by frequency will generate + // slices. + SlicingExprs []types.String `tfsdk:"slicing_exprs"` + // Configuration for monitoring snapshot tables. + Snapshot *MonitorSnapshot `tfsdk:"snapshot"` + // Full name of the table. + TableName types.String `tfsdk:"-" url:"-"` + // Configuration for monitoring time series tables. + TimeSeries *MonitorTimeSeries `tfsdk:"time_series"` +} + +type UpdatePermissions struct { + // Array of permissions change objects. + Changes []PermissionsChange `tfsdk:"changes"` + // Full name of securable. + FullName types.String `tfsdk:"-" url:"-"` + // Type of securable. + SecurableType SecurableType `tfsdk:"-" url:"-"` +} + +type UpdateRegisteredModelRequest struct { + // The comment attached to the registered model + Comment types.String `tfsdk:"comment"` + // The three-level (fully qualified) name of the registered model + FullName types.String `tfsdk:"-" url:"-"` + // New name for the registered model. + NewName types.String `tfsdk:"new_name"` + // The identifier of the user who owns the registered model + Owner types.String `tfsdk:"owner"` +} + +type UpdateResponse struct { +} + +type UpdateSchema struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Whether predictive optimization should be enabled for this object and + // objects under it. + EnablePredictiveOptimization EnablePredictiveOptimization `tfsdk:"enable_predictive_optimization"` + // Full name of the schema. + FullName types.String `tfsdk:"-" url:"-"` + // New name for the schema. + NewName types.String `tfsdk:"new_name"` + // Username of current owner of schema. + Owner types.String `tfsdk:"owner"` + // A map of key-value properties attached to the securable. + Properties map[string]types.String `tfsdk:"properties"` +} + +type UpdateStorageCredential struct { + // The AWS IAM role configuration. + AwsIamRole *AwsIamRoleRequest `tfsdk:"aws_iam_role"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentityResponse `tfsdk:"azure_managed_identity"` + // The Azure service principal configuration. + AzureServicePrincipal *AzureServicePrincipal `tfsdk:"azure_service_principal"` + // The Cloudflare API token configuration. + CloudflareApiToken *CloudflareApiToken `tfsdk:"cloudflare_api_token"` + // Comment associated with the credential. + Comment types.String `tfsdk:"comment"` + // The Databricks managed GCP service account configuration. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account"` + // Force update even if there are dependent external locations or external + // tables. + Force types.Bool `tfsdk:"force"` + // Whether the current securable is accessible from all workspaces or a + // specific set of workspaces. + IsolationMode IsolationMode `tfsdk:"isolation_mode"` + // Name of the storage credential. + Name types.String `tfsdk:"-" url:"-"` + // New name for the storage credential. + NewName types.String `tfsdk:"new_name"` + // Username of current owner of credential. + Owner types.String `tfsdk:"owner"` + // Whether the storage credential is only usable for read operations. + ReadOnly types.Bool `tfsdk:"read_only"` + // Supplying true to this argument skips validation of the updated + // credential. + SkipValidation types.Bool `tfsdk:"skip_validation"` +} + +// Update a table owner. +type UpdateTableRequest struct { + // Full name of the table. + FullName types.String `tfsdk:"-" url:"-"` + + Owner types.String `tfsdk:"owner"` +} + +type UpdateVolumeRequestContent struct { + // The comment attached to the volume + Comment types.String `tfsdk:"comment"` + // The three-level (fully qualified) name of the volume + Name types.String `tfsdk:"-" url:"-"` + // New name for the volume. + NewName types.String `tfsdk:"new_name"` + // The identifier of the user who owns the volume + Owner types.String `tfsdk:"owner"` +} + +type UpdateWorkspaceBindings struct { + // A list of workspace IDs. + AssignWorkspaces []types.Int64 `tfsdk:"assign_workspaces"` + // The name of the catalog. + Name types.String `tfsdk:"-" url:"-"` + // A list of workspace IDs. + UnassignWorkspaces []types.Int64 `tfsdk:"unassign_workspaces"` +} + +type UpdateWorkspaceBindingsParameters struct { + // List of workspace bindings + Add []WorkspaceBinding `tfsdk:"add"` + // List of workspace bindings + Remove []WorkspaceBinding `tfsdk:"remove"` + // The name of the securable. + SecurableName types.String `tfsdk:"-" url:"-"` + // The type of the securable. + SecurableType types.String `tfsdk:"-" url:"-"` +} + +type ValidateStorageCredential struct { + // The AWS IAM role configuration. + AwsIamRole *AwsIamRoleRequest `tfsdk:"aws_iam_role"` + // The Azure managed identity configuration. + AzureManagedIdentity *AzureManagedIdentityRequest `tfsdk:"azure_managed_identity"` + // The Azure service principal configuration. + AzureServicePrincipal *AzureServicePrincipal `tfsdk:"azure_service_principal"` + // The Cloudflare API token configuration. + CloudflareApiToken *CloudflareApiToken `tfsdk:"cloudflare_api_token"` + // The Databricks created GCP service account configuration. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccountRequest `tfsdk:"databricks_gcp_service_account"` + // The name of an existing external location to validate. + ExternalLocationName types.String `tfsdk:"external_location_name"` + // Whether the storage credential is only usable for read operations. + ReadOnly types.Bool `tfsdk:"read_only"` + // The name of the storage credential to validate. + StorageCredentialName types.String `tfsdk:"storage_credential_name"` + // The external location url to validate. + Url types.String `tfsdk:"url"` +} + +type ValidateStorageCredentialResponse struct { + // Whether the tested location is a directory in cloud storage. + IsDir types.Bool `tfsdk:"isDir"` + // The results of the validation check. + Results []ValidationResult `tfsdk:"results"` +} + +type ValidationResult struct { + // Error message would exist when the result does not equal to **PASS**. + Message types.String `tfsdk:"message"` + // The operation tested. + Operation ValidationResultOperation `tfsdk:"operation"` + // The results of the tested operation. + Result ValidationResultResult `tfsdk:"result"` +} + +// The operation tested. +type ValidationResultOperation string + +const ValidationResultOperationDelete ValidationResultOperation = `DELETE` + +const ValidationResultOperationList ValidationResultOperation = `LIST` + +const ValidationResultOperationPathExists ValidationResultOperation = `PATH_EXISTS` + +const ValidationResultOperationRead ValidationResultOperation = `READ` + +const ValidationResultOperationWrite ValidationResultOperation = `WRITE` + +// String representation for [fmt.Print] +func (f *ValidationResultOperation) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ValidationResultOperation) Set(v string) error { + switch v { + case `DELETE`, `LIST`, `PATH_EXISTS`, `READ`, `WRITE`: + *f = ValidationResultOperation(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETE", "LIST", "PATH_EXISTS", "READ", "WRITE"`, v) + } +} + +// Type always returns ValidationResultOperation to satisfy [pflag.Value] interface +func (f *ValidationResultOperation) Type() string { + return "ValidationResultOperation" +} + +// The results of the tested operation. +type ValidationResultResult string + +const ValidationResultResultFail ValidationResultResult = `FAIL` + +const ValidationResultResultPass ValidationResultResult = `PASS` + +const ValidationResultResultSkip ValidationResultResult = `SKIP` + +// String representation for [fmt.Print] +func (f *ValidationResultResult) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ValidationResultResult) Set(v string) error { + switch v { + case `FAIL`, `PASS`, `SKIP`: + *f = ValidationResultResult(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAIL", "PASS", "SKIP"`, v) + } +} + +// Type always returns ValidationResultResult to satisfy [pflag.Value] interface +func (f *ValidationResultResult) Type() string { + return "ValidationResultResult" +} + +type VolumeInfo struct { + // The AWS access point to use when accesing s3 for this external location. + AccessPoint types.String `tfsdk:"access_point"` + // Indicates whether the principal is limited to retrieving metadata for the + // associated object through the BROWSE privilege when include_browse is + // enabled in the request. + BrowseOnly types.Bool `tfsdk:"browse_only"` + // The name of the catalog where the schema and the volume are + CatalogName types.String `tfsdk:"catalog_name"` + // The comment attached to the volume + Comment types.String `tfsdk:"comment"` + + CreatedAt types.Int64 `tfsdk:"created_at"` + // The identifier of the user who created the volume + CreatedBy types.String `tfsdk:"created_by"` + // Encryption options that apply to clients connecting to cloud storage. + EncryptionDetails *EncryptionDetails `tfsdk:"encryption_details"` + // The three-level (fully qualified) name of the volume + FullName types.String `tfsdk:"full_name"` + // The unique identifier of the metastore + MetastoreId types.String `tfsdk:"metastore_id"` + // The name of the volume + Name types.String `tfsdk:"name"` + // The identifier of the user who owns the volume + Owner types.String `tfsdk:"owner"` + // The name of the schema where the volume is + SchemaName types.String `tfsdk:"schema_name"` + // The storage location on the cloud + StorageLocation types.String `tfsdk:"storage_location"` + + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // The identifier of the user who updated the volume last time + UpdatedBy types.String `tfsdk:"updated_by"` + // The unique identifier of the volume + VolumeId types.String `tfsdk:"volume_id"` + + VolumeType VolumeType `tfsdk:"volume_type"` +} + +type VolumeType string + +const VolumeTypeExternal VolumeType = `EXTERNAL` + +const VolumeTypeManaged VolumeType = `MANAGED` + +// String representation for [fmt.Print] +func (f *VolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *VolumeType) Set(v string) error { + switch v { + case `EXTERNAL`, `MANAGED`: + *f = VolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL", "MANAGED"`, v) + } +} + +// Type always returns VolumeType to satisfy [pflag.Value] interface +func (f *VolumeType) Type() string { + return "VolumeType" +} + +type WorkspaceBinding struct { + BindingType WorkspaceBindingBindingType `tfsdk:"binding_type"` + + WorkspaceId types.Int64 `tfsdk:"workspace_id"` +} + +type WorkspaceBindingBindingType string + +const WorkspaceBindingBindingTypeBindingTypeReadOnly WorkspaceBindingBindingType = `BINDING_TYPE_READ_ONLY` + +const WorkspaceBindingBindingTypeBindingTypeReadWrite WorkspaceBindingBindingType = `BINDING_TYPE_READ_WRITE` + +// String representation for [fmt.Print] +func (f *WorkspaceBindingBindingType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WorkspaceBindingBindingType) Set(v string) error { + switch v { + case `BINDING_TYPE_READ_ONLY`, `BINDING_TYPE_READ_WRITE`: + *f = WorkspaceBindingBindingType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BINDING_TYPE_READ_ONLY", "BINDING_TYPE_READ_WRITE"`, v) + } +} + +// Type always returns WorkspaceBindingBindingType to satisfy [pflag.Value] interface +func (f *WorkspaceBindingBindingType) Type() string { + return "WorkspaceBindingBindingType" +} + +// Currently assigned workspace bindings +type WorkspaceBindingsResponse struct { + // List of workspace bindings + Bindings []WorkspaceBinding `tfsdk:"bindings"` +} diff --git a/service/compute_tf/model.go b/service/compute_tf/model.go new file mode 100755 index 0000000000..7546fbbc88 --- /dev/null +++ b/service/compute_tf/model.go @@ -0,0 +1,3936 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package compute_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AddInstanceProfile struct { + // The AWS IAM role ARN of the role associated with the instance profile. + // This field is required if your role name and instance profile name do not + // match and you want to use the instance profile with [Databricks SQL + // Serverless]. + // + // Otherwise, this field is optional. + // + // [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html + IamRoleArn types.String `tfsdk:"iam_role_arn"` + // The AWS ARN of the instance profile to register with Databricks. This + // field is required. + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // Boolean flag indicating whether the instance profile should only be used + // in credential passthrough scenarios. If true, it means the instance + // profile contains an meta IAM role which could assume a wide range of + // roles. Therefore it should always be used with authorization. This field + // is optional, the default value is `false`. + IsMetaInstanceProfile types.Bool `tfsdk:"is_meta_instance_profile"` + // By default, Databricks validates that it has sufficient permissions to + // launch instances with the instance profile. This validation uses AWS + // dry-run mode for the RunInstances API. If validation fails with an error + // message that does not indicate an IAM related permission issue, (e.g. + // “Your requested instance type is not supported in your requested + // availability zone”), you can pass this flag to skip the validation and + // forcibly add the instance profile. + SkipValidation types.Bool `tfsdk:"skip_validation"` +} + +type AddResponse struct { +} + +type Adlsgen2Info struct { + // abfss destination, e.g. + // `abfss://@.dfs.core.windows.net/`. + Destination types.String `tfsdk:"destination"` +} + +type AutoScale struct { + // The maximum number of workers to which the cluster can scale up when + // overloaded. Note that `max_workers` must be strictly greater than + // `min_workers`. + MaxWorkers types.Int64 `tfsdk:"max_workers"` + // The minimum number of workers to which the cluster can scale down when + // underutilized. It is also the initial number of workers the cluster will + // have after creation. + MinWorkers types.Int64 `tfsdk:"min_workers"` +} + +type AwsAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. + // + // Note: If `first_on_demand` is zero, this availability type will be used + // for the entire cluster. + Availability AwsAvailability `tfsdk:"availability"` + // The number of volumes launched for each instance. Users can choose up to + // 10 volumes. This feature is only enabled for supported node types. Legacy + // node types cannot specify custom EBS volumes. For node types with no + // instance store, at least one EBS volume needs to be specified; otherwise, + // cluster creation will fail. + // + // These EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc. Instance + // store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc. + // + // If EBS volumes are attached, Databricks will configure Spark to use only + // the EBS volumes for scratch storage because heterogenously sized scratch + // devices can lead to inefficient disk utilization. If no EBS volumes are + // attached, Databricks will configure Spark to use instance store volumes. + // + // Please note that if EBS volumes are specified, then the Spark + // configuration `spark.local.dir` will be overridden. + EbsVolumeCount types.Int64 `tfsdk:"ebs_volume_count"` + // If using gp3 volumes, what IOPS to use for the disk. If this is not set, + // the maximum performance of a gp2 volume with the same volume size will be + // used. + EbsVolumeIops types.Int64 `tfsdk:"ebs_volume_iops"` + // The size of each EBS volume (in GiB) launched for each instance. For + // general purpose SSD, this value must be within the range 100 - 4096. For + // throughput optimized HDD, this value must be within the range 500 - 4096. + EbsVolumeSize types.Int64 `tfsdk:"ebs_volume_size"` + // If using gp3 volumes, what throughput to use for the disk. If this is not + // set, the maximum performance of a gp2 volume with the same volume size + // will be used. + EbsVolumeThroughput types.Int64 `tfsdk:"ebs_volume_throughput"` + // The type of EBS volumes that will be launched with this cluster. + EbsVolumeType EbsVolumeType `tfsdk:"ebs_volume_type"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. If this value is greater than 0, the cluster driver + // node in particular will be placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand types.Int64 `tfsdk:"first_on_demand"` + // Nodes for this cluster will only be placed on AWS instances with this + // instance profile. If ommitted, nodes will be placed on instances without + // an IAM instance profile. The instance profile must have previously been + // added to the Databricks environment by an account administrator. + // + // This feature may only be available to certain customer plans. + // + // If this field is ommitted, we will pull in the default from the conf if + // it exists. + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // The bid price for AWS spot instances, as a percentage of the + // corresponding instance type's on-demand price. For example, if this field + // is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then + // the bid price is half of the price of on-demand `r3.xlarge` instances. + // Similarly, if this field is set to 200, the bid price is twice the price + // of on-demand `r3.xlarge` instances. If not specified, the default value + // is 100. When spot instances are requested for this cluster, only spot + // instances whose bid price percentage matches this field will be + // considered. Note that, for safety, we enforce this field to be no more + // than 10000. + // + // The default value and documentation here should be kept consistent with + // CommonConf.defaultSpotBidPricePercent and + // CommonConf.maxSpotBidPricePercent. + SpotBidPricePercent types.Int64 `tfsdk:"spot_bid_price_percent"` + // Identifier for the availability zone/datacenter in which the cluster + // resides. This string will be of a form like "us-west-2a". The provided + // availability zone must be in the same region as the Databricks + // deployment. For example, "us-west-2a" is not a valid zone id if the + // Databricks deployment resides in the "us-east-1" region. This is an + // optional field at cluster creation, and if not specified, a default zone + // will be used. If the zone specified is "auto", will try to place cluster + // in a zone with high availability, and will retry placement in a different + // AZ if there is not enough capacity. The list of available zones as well + // as the default value can be found by using the `List Zones` method. + ZoneId types.String `tfsdk:"zone_id"` +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. +// +// Note: If `first_on_demand` is zero, this availability type will be used for +// the entire cluster. +type AwsAvailability string + +const AwsAvailabilityOnDemand AwsAvailability = `ON_DEMAND` + +const AwsAvailabilitySpot AwsAvailability = `SPOT` + +const AwsAvailabilitySpotWithFallback AwsAvailability = `SPOT_WITH_FALLBACK` + +// String representation for [fmt.Print] +func (f *AwsAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AwsAvailability) Set(v string) error { + switch v { + case `ON_DEMAND`, `SPOT`, `SPOT_WITH_FALLBACK`: + *f = AwsAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND", "SPOT", "SPOT_WITH_FALLBACK"`, v) + } +} + +// Type always returns AwsAvailability to satisfy [pflag.Value] interface +func (f *AwsAvailability) Type() string { + return "AwsAvailability" +} + +type AzureAttributes struct { + // Availability type used for all subsequent nodes past the + // `first_on_demand` ones. Note: If `first_on_demand` is zero (which only + // happens on pool clusters), this availability type will be used for the + // entire cluster. + Availability AzureAvailability `tfsdk:"availability"` + // The first `first_on_demand` nodes of the cluster will be placed on + // on-demand instances. This value should be greater than 0, to make sure + // the cluster driver node is placed on an on-demand instance. If this value + // is greater than or equal to the current cluster size, all nodes will be + // placed on on-demand instances. If this value is less than the current + // cluster size, `first_on_demand` nodes will be placed on on-demand + // instances and the remainder will be placed on `availability` instances. + // Note that this value does not affect cluster size and cannot currently be + // mutated over the lifetime of a cluster. + FirstOnDemand types.Int64 `tfsdk:"first_on_demand"` + // Defines values necessary to configure and run Azure Log Analytics agent + LogAnalyticsInfo *LogAnalyticsInfo `tfsdk:"log_analytics_info"` + // The max bid price to be used for Azure spot instances. The Max price for + // the bid cannot be higher than the on-demand price of the instance. If not + // specified, the default value is -1, which specifies that the instance + // cannot be evicted on the basis of price, and only on the basis of + // availability. Further, the value should > 0 or -1. + SpotBidMaxPrice types.Float64 `tfsdk:"spot_bid_max_price"` +} + +// Availability type used for all subsequent nodes past the `first_on_demand` +// ones. Note: If `first_on_demand` is zero (which only happens on pool +// clusters), this availability type will be used for the entire cluster. +type AzureAvailability string + +const AzureAvailabilityOnDemandAzure AzureAvailability = `ON_DEMAND_AZURE` + +const AzureAvailabilitySpotAzure AzureAvailability = `SPOT_AZURE` + +const AzureAvailabilitySpotWithFallbackAzure AzureAvailability = `SPOT_WITH_FALLBACK_AZURE` + +// String representation for [fmt.Print] +func (f *AzureAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AzureAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_AZURE`, `SPOT_AZURE`, `SPOT_WITH_FALLBACK_AZURE`: + *f = AzureAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_AZURE", "SPOT_AZURE", "SPOT_WITH_FALLBACK_AZURE"`, v) + } +} + +// Type always returns AzureAvailability to satisfy [pflag.Value] interface +func (f *AzureAvailability) Type() string { + return "AzureAvailability" +} + +type CancelCommand struct { + ClusterId types.String `tfsdk:"clusterId"` + + CommandId types.String `tfsdk:"commandId"` + + ContextId types.String `tfsdk:"contextId"` +} + +type CancelResponse struct { +} + +type ChangeClusterOwner struct { + // + ClusterId types.String `tfsdk:"cluster_id"` + // New owner of the cluster_id after this RPC. + OwnerUsername types.String `tfsdk:"owner_username"` +} + +type ChangeClusterOwnerResponse struct { +} + +type ClientsTypes struct { + // With jobs set, the cluster can be used for jobs + Jobs types.Bool `tfsdk:"jobs"` + // With notebooks set, this cluster can be used for notebooks + Notebooks types.Bool `tfsdk:"notebooks"` +} + +type CloneCluster struct { + // The cluster that is being cloned. + SourceClusterId types.String `tfsdk:"source_cluster_id"` +} + +type CloudProviderNodeInfo struct { + Status []CloudProviderNodeStatus `tfsdk:"status"` +} + +type CloudProviderNodeStatus string + +const CloudProviderNodeStatusNotAvailableInRegion CloudProviderNodeStatus = `NotAvailableInRegion` + +const CloudProviderNodeStatusNotEnabledOnSubscription CloudProviderNodeStatus = `NotEnabledOnSubscription` + +// String representation for [fmt.Print] +func (f *CloudProviderNodeStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CloudProviderNodeStatus) Set(v string) error { + switch v { + case `NotAvailableInRegion`, `NotEnabledOnSubscription`: + *f = CloudProviderNodeStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NotAvailableInRegion", "NotEnabledOnSubscription"`, v) + } +} + +// Type always returns CloudProviderNodeStatus to satisfy [pflag.Value] interface +func (f *CloudProviderNodeStatus) Type() string { + return "CloudProviderNodeStatus" +} + +type ClusterAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel ClusterPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type ClusterAccessControlResponse struct { + // All permissions. + AllPermissions []ClusterPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type ClusterAttributes struct { + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `tfsdk:"aws_attributes"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `tfsdk:"azure_attributes"` + // The configuration for delivering spark logs to a long-term storage + // destination. Two kinds of destinations (dbfs and s3) are supported. Only + // one destination can be specified for one cluster. If the conf is given, + // the logs will be delivered to the destination every `5 mins`. The + // destination of driver logs is `$destination/$clusterId/driver`, while the + // destination of executor logs is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName types.String `tfsdk:"cluster_name"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // * `NONE`: No security isolation for multiple users sharing the cluster. + // Data governance features are not available in this mode. * `SINGLE_USER`: + // A secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `tfsdk:"data_security_mode"` + + DockerImage *DockerImage `tfsdk:"docker_image"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId types.String `tfsdk:"driver_instance_pool_id"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId types.String `tfsdk:"driver_node_type_id"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `tfsdk:"init_scripts"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId types.String `tfsdk:"instance_pool_id"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId types.String `tfsdk:"policy_id"` + // Decides which runtime engine to be use, e.g. Standard vs. Photon. If + // unspecified, the runtime engine is inferred from spark_version. + RuntimeEngine RuntimeEngine `tfsdk:"runtime_engine"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName types.String `tfsdk:"single_user_name"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]types.String `tfsdk:"spark_conf"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]types.String `tfsdk:"spark_env_vars"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion types.String `tfsdk:"spark_version"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []types.String `tfsdk:"ssh_public_keys"` + + WorkloadType *WorkloadType `tfsdk:"workload_type"` +} + +type ClusterDetails struct { + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `tfsdk:"autoscale"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `tfsdk:"aws_attributes"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `tfsdk:"azure_attributes"` + // Number of CPU cores available for this cluster. Note that this can be + // fractional, e.g. 7.5 cores, since certain node types are configured to + // share cores between Spark nodes on the same instance. + ClusterCores types.Float64 `tfsdk:"cluster_cores"` + // Canonical identifier for the cluster. This id is retained during cluster + // restarts and resizes, while each new cluster has a globally unique id. + ClusterId types.String `tfsdk:"cluster_id"` + // The configuration for delivering spark logs to a long-term storage + // destination. Two kinds of destinations (dbfs and s3) are supported. Only + // one destination can be specified for one cluster. If the conf is given, + // the logs will be delivered to the destination every `5 mins`. The + // destination of driver logs is `$destination/$clusterId/driver`, while the + // destination of executor logs is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf"` + // Cluster log delivery status. + ClusterLogStatus *LogSyncStatus `tfsdk:"cluster_log_status"` + // Total amount of cluster memory, in megabytes + ClusterMemoryMb types.Int64 `tfsdk:"cluster_memory_mb"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName types.String `tfsdk:"cluster_name"` + // Determines whether the cluster was created by a user through the UI, + // created by the Databricks Jobs Scheduler, or through an API request. This + // is the same as cluster_creator, but read only. + ClusterSource ClusterSource `tfsdk:"cluster_source"` + // Creator user name. The field won't be included in the response if the + // user has already been deleted. + CreatorUserName types.String `tfsdk:"creator_user_name"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // * `NONE`: No security isolation for multiple users sharing the cluster. + // Data governance features are not available in this mode. * `SINGLE_USER`: + // A secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `tfsdk:"data_security_mode"` + // Tags that are added by Databricks regardless of any `custom_tags`, + // including: + // + // - Vendor: Databricks + // + // - Creator: + // + // - ClusterName: + // + // - ClusterId: + // + // - Name: + DefaultTags map[string]types.String `tfsdk:"default_tags"` + + DockerImage *DockerImage `tfsdk:"docker_image"` + // Node on which the Spark driver resides. The driver node contains the + // Spark master and the Databricks application that manages the per-notebook + // Spark REPLs. + Driver *SparkNode `tfsdk:"driver"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId types.String `tfsdk:"driver_instance_pool_id"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId types.String `tfsdk:"driver_node_type_id"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption"` + // Nodes on which the Spark executors reside. + Executors []SparkNode `tfsdk:"executors"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `tfsdk:"init_scripts"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId types.String `tfsdk:"instance_pool_id"` + // Port on which Spark JDBC server is listening, in the driver nod. No + // service will be listeningon on this port in executor nodes. + JdbcPort types.Int64 `tfsdk:"jdbc_port"` + // the timestamp that the cluster was started/restarted + LastRestartedTime types.Int64 `tfsdk:"last_restarted_time"` + // Time when the cluster driver last lost its state (due to a restart or + // driver failure). + LastStateLossTime types.Int64 `tfsdk:"last_state_loss_time"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers types.Int64 `tfsdk:"num_workers"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId types.String `tfsdk:"policy_id"` + // Decides which runtime engine to be use, e.g. Standard vs. Photon. If + // unspecified, the runtime engine is inferred from spark_version. + RuntimeEngine RuntimeEngine `tfsdk:"runtime_engine"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName types.String `tfsdk:"single_user_name"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]types.String `tfsdk:"spark_conf"` + // A canonical SparkContext identifier. This value *does* change when the + // Spark driver restarts. The pair `(cluster_id, spark_context_id)` is a + // globally unique identifier over all Spark contexts. + SparkContextId types.Int64 `tfsdk:"spark_context_id"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]types.String `tfsdk:"spark_env_vars"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion types.String `tfsdk:"spark_version"` + // `spec` contains a snapshot of the field values that were used to create + // or edit this cluster. The contents of `spec` can be used in the body of a + // create cluster request. This field might not be populated for older + // clusters. Note: not included in the response of the ListClusters API. + Spec *ClusterSpec `tfsdk:"spec"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []types.String `tfsdk:"ssh_public_keys"` + // Time (in epoch milliseconds) when the cluster creation request was + // received (when the cluster entered a `PENDING` state). + StartTime types.Int64 `tfsdk:"start_time"` + // Current state of the cluster. + State State `tfsdk:"state"` + // A message associated with the most recent state transition (e.g., the + // reason why the cluster entered a `TERMINATED` state). + StateMessage types.String `tfsdk:"state_message"` + // Time (in epoch milliseconds) when the cluster was terminated, if + // applicable. + TerminatedTime types.Int64 `tfsdk:"terminated_time"` + // Information about why the cluster was terminated. This field only appears + // when the cluster is in a `TERMINATING` or `TERMINATED` state. + TerminationReason *TerminationReason `tfsdk:"termination_reason"` + + WorkloadType *WorkloadType `tfsdk:"workload_type"` +} + +type ClusterEvent struct { + // + ClusterId types.String `tfsdk:"cluster_id"` + // + DataPlaneEventDetails *DataPlaneEventDetails `tfsdk:"data_plane_event_details"` + // + Details *EventDetails `tfsdk:"details"` + // The timestamp when the event occurred, stored as the number of + // milliseconds since the Unix epoch. If not provided, this will be assigned + // by the Timeline service. + Timestamp types.Int64 `tfsdk:"timestamp"` + + Type EventType `tfsdk:"type"` +} + +type ClusterLibraryStatuses struct { + // Unique identifier for the cluster. + ClusterId types.String `tfsdk:"cluster_id"` + // Status of all libraries on the cluster. + LibraryStatuses []LibraryFullStatus `tfsdk:"library_statuses"` +} + +type ClusterLogConf struct { + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `tfsdk:"dbfs"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `tfsdk:"s3"` +} + +type ClusterPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel ClusterPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type ClusterPermissionLevel string + +const ClusterPermissionLevelCanAttachTo ClusterPermissionLevel = `CAN_ATTACH_TO` + +const ClusterPermissionLevelCanManage ClusterPermissionLevel = `CAN_MANAGE` + +const ClusterPermissionLevelCanRestart ClusterPermissionLevel = `CAN_RESTART` + +// String representation for [fmt.Print] +func (f *ClusterPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterPermissionLevel) Set(v string) error { + switch v { + case `CAN_ATTACH_TO`, `CAN_MANAGE`, `CAN_RESTART`: + *f = ClusterPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_ATTACH_TO", "CAN_MANAGE", "CAN_RESTART"`, v) + } +} + +// Type always returns ClusterPermissionLevel to satisfy [pflag.Value] interface +func (f *ClusterPermissionLevel) Type() string { + return "ClusterPermissionLevel" +} + +type ClusterPermissions struct { + AccessControlList []ClusterAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type ClusterPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel ClusterPermissionLevel `tfsdk:"permission_level"` +} + +type ClusterPermissionsRequest struct { + AccessControlList []ClusterAccessControlRequest `tfsdk:"access_control_list"` + // The cluster for which to get or manage permissions. + ClusterId types.String `tfsdk:"-" url:"-"` +} + +type ClusterPolicyAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel ClusterPolicyPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type ClusterPolicyAccessControlResponse struct { + // All permissions. + AllPermissions []ClusterPolicyPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type ClusterPolicyPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel ClusterPolicyPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type ClusterPolicyPermissionLevel string + +const ClusterPolicyPermissionLevelCanUse ClusterPolicyPermissionLevel = `CAN_USE` + +// String representation for [fmt.Print] +func (f *ClusterPolicyPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterPolicyPermissionLevel) Set(v string) error { + switch v { + case `CAN_USE`: + *f = ClusterPolicyPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_USE"`, v) + } +} + +// Type always returns ClusterPolicyPermissionLevel to satisfy [pflag.Value] interface +func (f *ClusterPolicyPermissionLevel) Type() string { + return "ClusterPolicyPermissionLevel" +} + +type ClusterPolicyPermissions struct { + AccessControlList []ClusterPolicyAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type ClusterPolicyPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel ClusterPolicyPermissionLevel `tfsdk:"permission_level"` +} + +type ClusterPolicyPermissionsRequest struct { + AccessControlList []ClusterPolicyAccessControlRequest `tfsdk:"access_control_list"` + // The cluster policy for which to get or manage permissions. + ClusterPolicyId types.String `tfsdk:"-" url:"-"` +} + +type ClusterSize struct { + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `tfsdk:"autoscale"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers types.Int64 `tfsdk:"num_workers"` +} + +// Determines whether the cluster was created by a user through the UI, created +// by the Databricks Jobs Scheduler, or through an API request. This is the same +// as cluster_creator, but read only. +type ClusterSource string + +const ClusterSourceApi ClusterSource = `API` + +const ClusterSourceJob ClusterSource = `JOB` + +const ClusterSourceModels ClusterSource = `MODELS` + +const ClusterSourcePipeline ClusterSource = `PIPELINE` + +const ClusterSourcePipelineMaintenance ClusterSource = `PIPELINE_MAINTENANCE` + +const ClusterSourceSql ClusterSource = `SQL` + +const ClusterSourceUi ClusterSource = `UI` + +// String representation for [fmt.Print] +func (f *ClusterSource) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterSource) Set(v string) error { + switch v { + case `API`, `JOB`, `MODELS`, `PIPELINE`, `PIPELINE_MAINTENANCE`, `SQL`, `UI`: + *f = ClusterSource(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "API", "JOB", "MODELS", "PIPELINE", "PIPELINE_MAINTENANCE", "SQL", "UI"`, v) + } +} + +// Type always returns ClusterSource to satisfy [pflag.Value] interface +func (f *ClusterSource) Type() string { + return "ClusterSource" +} + +type ClusterSpec struct { + // When set to true, fixed and default values from the policy will be used + // for fields that are omitted. When set to false, only fixed values from + // the policy will be applied. + ApplyPolicyDefaultValues types.Bool `tfsdk:"apply_policy_default_values"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `tfsdk:"autoscale"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `tfsdk:"aws_attributes"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `tfsdk:"azure_attributes"` + // The configuration for delivering spark logs to a long-term storage + // destination. Two kinds of destinations (dbfs and s3) are supported. Only + // one destination can be specified for one cluster. If the conf is given, + // the logs will be delivered to the destination every `5 mins`. The + // destination of driver logs is `$destination/$clusterId/driver`, while the + // destination of executor logs is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName types.String `tfsdk:"cluster_name"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // * `NONE`: No security isolation for multiple users sharing the cluster. + // Data governance features are not available in this mode. * `SINGLE_USER`: + // A secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `tfsdk:"data_security_mode"` + + DockerImage *DockerImage `tfsdk:"docker_image"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId types.String `tfsdk:"driver_instance_pool_id"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId types.String `tfsdk:"driver_node_type_id"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `tfsdk:"init_scripts"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId types.String `tfsdk:"instance_pool_id"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers types.Int64 `tfsdk:"num_workers"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId types.String `tfsdk:"policy_id"` + // Decides which runtime engine to be use, e.g. Standard vs. Photon. If + // unspecified, the runtime engine is inferred from spark_version. + RuntimeEngine RuntimeEngine `tfsdk:"runtime_engine"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName types.String `tfsdk:"single_user_name"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]types.String `tfsdk:"spark_conf"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]types.String `tfsdk:"spark_env_vars"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion types.String `tfsdk:"spark_version"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []types.String `tfsdk:"ssh_public_keys"` + + WorkloadType *WorkloadType `tfsdk:"workload_type"` +} + +// Get status +type ClusterStatus struct { + // Unique identifier of the cluster whose status should be retrieved. + ClusterId types.String `tfsdk:"-" url:"cluster_id"` +} + +type Command struct { + // Running cluster id + ClusterId types.String `tfsdk:"clusterId"` + // Executable code + Command types.String `tfsdk:"command"` + // Running context id + ContextId types.String `tfsdk:"contextId"` + + Language Language `tfsdk:"language"` +} + +type CommandStatus string + +const CommandStatusCancelled CommandStatus = `Cancelled` + +const CommandStatusCancelling CommandStatus = `Cancelling` + +const CommandStatusError CommandStatus = `Error` + +const CommandStatusFinished CommandStatus = `Finished` + +const CommandStatusQueued CommandStatus = `Queued` + +const CommandStatusRunning CommandStatus = `Running` + +// String representation for [fmt.Print] +func (f *CommandStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CommandStatus) Set(v string) error { + switch v { + case `Cancelled`, `Cancelling`, `Error`, `Finished`, `Queued`, `Running`: + *f = CommandStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Cancelled", "Cancelling", "Error", "Finished", "Queued", "Running"`, v) + } +} + +// Type always returns CommandStatus to satisfy [pflag.Value] interface +func (f *CommandStatus) Type() string { + return "CommandStatus" +} + +// Get command info +type CommandStatusRequest struct { + ClusterId types.String `tfsdk:"-" url:"clusterId"` + + CommandId types.String `tfsdk:"-" url:"commandId"` + + ContextId types.String `tfsdk:"-" url:"contextId"` +} + +type CommandStatusResponse struct { + Id types.String `tfsdk:"id"` + + Results *Results `tfsdk:"results"` + + Status CommandStatus `tfsdk:"status"` +} + +type ContextStatus string + +const ContextStatusError ContextStatus = `Error` + +const ContextStatusPending ContextStatus = `Pending` + +const ContextStatusRunning ContextStatus = `Running` + +// String representation for [fmt.Print] +func (f *ContextStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ContextStatus) Set(v string) error { + switch v { + case `Error`, `Pending`, `Running`: + *f = ContextStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Error", "Pending", "Running"`, v) + } +} + +// Type always returns ContextStatus to satisfy [pflag.Value] interface +func (f *ContextStatus) Type() string { + return "ContextStatus" +} + +// Get status +type ContextStatusRequest struct { + ClusterId types.String `tfsdk:"-" url:"clusterId"` + + ContextId types.String `tfsdk:"-" url:"contextId"` +} + +type ContextStatusResponse struct { + Id types.String `tfsdk:"id"` + + Status ContextStatus `tfsdk:"status"` +} + +type CreateCluster struct { + // When set to true, fixed and default values from the policy will be used + // for fields that are omitted. When set to false, only fixed values from + // the policy will be applied. + ApplyPolicyDefaultValues types.Bool `tfsdk:"apply_policy_default_values"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `tfsdk:"autoscale"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `tfsdk:"aws_attributes"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `tfsdk:"azure_attributes"` + // When specified, this clones libraries from a source cluster during the + // creation of a new cluster. + CloneFrom *CloneCluster `tfsdk:"clone_from"` + // The configuration for delivering spark logs to a long-term storage + // destination. Two kinds of destinations (dbfs and s3) are supported. Only + // one destination can be specified for one cluster. If the conf is given, + // the logs will be delivered to the destination every `5 mins`. The + // destination of driver logs is `$destination/$clusterId/driver`, while the + // destination of executor logs is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName types.String `tfsdk:"cluster_name"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // * `NONE`: No security isolation for multiple users sharing the cluster. + // Data governance features are not available in this mode. * `SINGLE_USER`: + // A secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `tfsdk:"data_security_mode"` + + DockerImage *DockerImage `tfsdk:"docker_image"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId types.String `tfsdk:"driver_instance_pool_id"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId types.String `tfsdk:"driver_node_type_id"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `tfsdk:"init_scripts"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId types.String `tfsdk:"instance_pool_id"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers types.Int64 `tfsdk:"num_workers"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId types.String `tfsdk:"policy_id"` + // Decides which runtime engine to be use, e.g. Standard vs. Photon. If + // unspecified, the runtime engine is inferred from spark_version. + RuntimeEngine RuntimeEngine `tfsdk:"runtime_engine"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName types.String `tfsdk:"single_user_name"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]types.String `tfsdk:"spark_conf"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]types.String `tfsdk:"spark_env_vars"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion types.String `tfsdk:"spark_version"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []types.String `tfsdk:"ssh_public_keys"` + + WorkloadType *WorkloadType `tfsdk:"workload_type"` +} + +type CreateClusterResponse struct { + ClusterId types.String `tfsdk:"cluster_id"` +} + +type CreateContext struct { + // Running cluster id + ClusterId types.String `tfsdk:"clusterId"` + + Language Language `tfsdk:"language"` +} + +type CreateInstancePool struct { + // Attributes related to instance pools running on Amazon Web Services. If + // not specified at pool creation, a set of default values will be used. + AwsAttributes *InstancePoolAwsAttributes `tfsdk:"aws_attributes"` + // Attributes related to instance pools running on Azure. If not specified + // at pool creation, a set of default values will be used. + AzureAttributes *InstancePoolAzureAttributes `tfsdk:"azure_attributes"` + // Additional tags for pool resources. Databricks will tag all pool + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // Defines the specification of the disks that will be attached to all spark + // containers. + DiskSpec *DiskSpec `tfsdk:"disk_spec"` + // Autoscaling Local Storage: when enabled, this instances in this pool will + // dynamically acquire additional disk space when its Spark workers are + // running low on disk space. In AWS, this feature requires specific AWS + // permissions to function correctly - refer to the User Guide for more + // details. + EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk"` + // Attributes related to instance pools running on Google Cloud Platform. If + // not specified at pool creation, a set of default values will be used. + GcpAttributes *InstancePoolGcpAttributes `tfsdk:"gcp_attributes"` + // Automatically terminates the extra instances in the pool cache after they + // are inactive for this time in minutes if min_idle_instances requirement + // is already met. If not set, the extra pool instances will be + // automatically terminated after a default timeout. If specified, the + // threshold must be between 0 and 10000 minutes. Users can also set this + // value to 0 to instantly remove idle instances from the cache if min cache + // size could still hold. + IdleInstanceAutoterminationMinutes types.Int64 `tfsdk:"idle_instance_autotermination_minutes"` + // Pool name requested by the user. Pool name must be unique. Length must be + // between 1 and 100 characters. + InstancePoolName types.String `tfsdk:"instance_pool_name"` + // Maximum number of outstanding instances to keep in the pool, including + // both instances used by clusters and idle instances. Clusters that require + // further instance provisioning will fail during upsize requests. + MaxCapacity types.Int64 `tfsdk:"max_capacity"` + // Minimum number of idle instances to keep in the instance pool + MinIdleInstances types.Int64 `tfsdk:"min_idle_instances"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` + // Custom Docker Image BYOC + PreloadedDockerImages []DockerImage `tfsdk:"preloaded_docker_images"` + // A list containing at most one preloaded Spark image version for the pool. + // Pool-backed clusters started with the preloaded Spark version will start + // faster. A list of available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + PreloadedSparkVersions []types.String `tfsdk:"preloaded_spark_versions"` +} + +type CreateInstancePoolResponse struct { + // The ID of the created instance pool. + InstancePoolId types.String `tfsdk:"instance_pool_id"` +} + +type CreatePolicy struct { + // Policy definition document expressed in [Databricks Cluster Policy + // Definition Language]. + // + // [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + Definition types.String `tfsdk:"definition"` + // Additional human-readable description of the cluster policy. + Description types.String `tfsdk:"description"` + // A list of libraries to be installed on the next cluster restart that uses + // this policy. The maximum number of libraries is 500. + Libraries []Library `tfsdk:"libraries"` + // Max number of clusters per user that can be active using this policy. If + // not present, there is no max limit. + MaxClustersPerUser types.Int64 `tfsdk:"max_clusters_per_user"` + // Cluster Policy name requested by the user. This has to be unique. Length + // must be between 1 and 100 characters. + Name types.String `tfsdk:"name"` + // Policy definition JSON document expressed in [Databricks Policy + // Definition Language]. The JSON document must be passed as a string and + // cannot be embedded in the requests. + // + // You can use this to customize the policy definition inherited from the + // policy family. Policy rules specified here are merged into the inherited + // policy definition. + // + // [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + PolicyFamilyDefinitionOverrides types.String `tfsdk:"policy_family_definition_overrides"` + // ID of the policy family. The cluster policy's policy definition inherits + // the policy family's policy definition. + // + // Cannot be used with `definition`. Use + // `policy_family_definition_overrides` instead to customize the policy + // definition. + PolicyFamilyId types.String `tfsdk:"policy_family_id"` +} + +type CreatePolicyResponse struct { + // Canonical unique identifier for the cluster policy. + PolicyId types.String `tfsdk:"policy_id"` +} + +type CreateResponse struct { + // The global init script ID. + ScriptId types.String `tfsdk:"script_id"` +} + +type Created struct { + Id types.String `tfsdk:"id"` +} + +type DataPlaneEventDetails struct { + // + EventType DataPlaneEventDetailsEventType `tfsdk:"event_type"` + // + ExecutorFailures types.Int64 `tfsdk:"executor_failures"` + // + HostId types.String `tfsdk:"host_id"` + // + Timestamp types.Int64 `tfsdk:"timestamp"` +} + +// +type DataPlaneEventDetailsEventType string + +const DataPlaneEventDetailsEventTypeNodeBlacklisted DataPlaneEventDetailsEventType = `NODE_BLACKLISTED` + +const DataPlaneEventDetailsEventTypeNodeExcludedDecommissioned DataPlaneEventDetailsEventType = `NODE_EXCLUDED_DECOMMISSIONED` + +// String representation for [fmt.Print] +func (f *DataPlaneEventDetailsEventType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataPlaneEventDetailsEventType) Set(v string) error { + switch v { + case `NODE_BLACKLISTED`, `NODE_EXCLUDED_DECOMMISSIONED`: + *f = DataPlaneEventDetailsEventType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NODE_BLACKLISTED", "NODE_EXCLUDED_DECOMMISSIONED"`, v) + } +} + +// Type always returns DataPlaneEventDetailsEventType to satisfy [pflag.Value] interface +func (f *DataPlaneEventDetailsEventType) Type() string { + return "DataPlaneEventDetailsEventType" +} + +// Data security mode decides what data governance model to use when accessing +// data from a cluster. +// +// * `NONE`: No security isolation for multiple users sharing the cluster. Data +// governance features are not available in this mode. * `SINGLE_USER`: A secure +// cluster that can only be exclusively used by a single user specified in +// `single_user_name`. Most programming languages, cluster features and data +// governance features are available in this mode. * `USER_ISOLATION`: A secure +// cluster that can be shared by multiple users. Cluster users are fully +// isolated so that they cannot see each other's data and credentials. Most data +// governance features are supported in this mode. But programming languages and +// cluster features might be limited. +// +// The following modes are deprecated starting with Databricks Runtime 15.0 and +// will be removed for future Databricks Runtime versions: +// +// * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table ACL +// clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating from +// legacy Passthrough on high concurrency clusters. * `LEGACY_SINGLE_USER`: This +// mode is for users migrating from legacy Passthrough on standard clusters. * +// `LEGACY_SINGLE_USER_STANDARD`: This mode provides a way that doesn’t have +// UC nor passthrough enabled. +type DataSecurityMode string + +// This mode is for users migrating from legacy Passthrough on high concurrency +// clusters. +const DataSecurityModeLegacyPassthrough DataSecurityMode = `LEGACY_PASSTHROUGH` + +// This mode is for users migrating from legacy Passthrough on standard +// clusters. +const DataSecurityModeLegacySingleUser DataSecurityMode = `LEGACY_SINGLE_USER` + +// This mode provides a way that doesn’t have UC nor passthrough enabled. +const DataSecurityModeLegacySingleUserStandard DataSecurityMode = `LEGACY_SINGLE_USER_STANDARD` + +// This mode is for users migrating from legacy Table ACL clusters. +const DataSecurityModeLegacyTableAcl DataSecurityMode = `LEGACY_TABLE_ACL` + +// No security isolation for multiple users sharing the cluster. Data governance +// features are not available in this mode. +const DataSecurityModeNone DataSecurityMode = `NONE` + +// A secure cluster that can only be exclusively used by a single user specified +// in `single_user_name`. Most programming languages, cluster features and data +// governance features are available in this mode. +const DataSecurityModeSingleUser DataSecurityMode = `SINGLE_USER` + +// A secure cluster that can be shared by multiple users. Cluster users are +// fully isolated so that they cannot see each other's data and credentials. +// Most data governance features are supported in this mode. But programming +// languages and cluster features might be limited. +const DataSecurityModeUserIsolation DataSecurityMode = `USER_ISOLATION` + +// String representation for [fmt.Print] +func (f *DataSecurityMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataSecurityMode) Set(v string) error { + switch v { + case `LEGACY_PASSTHROUGH`, `LEGACY_SINGLE_USER`, `LEGACY_SINGLE_USER_STANDARD`, `LEGACY_TABLE_ACL`, `NONE`, `SINGLE_USER`, `USER_ISOLATION`: + *f = DataSecurityMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "LEGACY_PASSTHROUGH", "LEGACY_SINGLE_USER", "LEGACY_SINGLE_USER_STANDARD", "LEGACY_TABLE_ACL", "NONE", "SINGLE_USER", "USER_ISOLATION"`, v) + } +} + +// Type always returns DataSecurityMode to satisfy [pflag.Value] interface +func (f *DataSecurityMode) Type() string { + return "DataSecurityMode" +} + +type DbfsStorageInfo struct { + // dbfs destination, e.g. `dbfs:/my/path` + Destination types.String `tfsdk:"destination"` +} + +type DeleteCluster struct { + // The cluster to be terminated. + ClusterId types.String `tfsdk:"cluster_id"` +} + +type DeleteClusterResponse struct { +} + +// Delete init script +type DeleteGlobalInitScriptRequest struct { + // The ID of the global init script. + ScriptId types.String `tfsdk:"-" url:"-"` +} + +type DeleteInstancePool struct { + // The instance pool to be terminated. + InstancePoolId types.String `tfsdk:"instance_pool_id"` +} + +type DeleteInstancePoolResponse struct { +} + +type DeletePolicy struct { + // The ID of the policy to delete. + PolicyId types.String `tfsdk:"policy_id"` +} + +type DeletePolicyResponse struct { +} + +type DeleteResponse struct { +} + +type DestroyContext struct { + ClusterId types.String `tfsdk:"clusterId"` + + ContextId types.String `tfsdk:"contextId"` +} + +type DestroyResponse struct { +} + +type DiskSpec struct { + // The number of disks launched for each instance: - This feature is only + // enabled for supported node types. - Users can choose up to the limit of + // the disks supported by the node type. - For node types with no OS disk, + // at least one disk must be specified; otherwise, cluster creation will + // fail. + // + // If disks are attached, Databricks will configure Spark to use only the + // disks for scratch storage, because heterogenously sized scratch devices + // can lead to inefficient disk utilization. If no disks are attached, + // Databricks will configure Spark to use instance store disks. + // + // Note: If disks are specified, then the Spark configuration + // `spark.local.dir` will be overridden. + // + // Disks will be mounted at: - For AWS: `/ebs0`, `/ebs1`, and etc. - For + // Azure: `/remote_volume0`, `/remote_volume1`, and etc. + DiskCount types.Int64 `tfsdk:"disk_count"` + + DiskIops types.Int64 `tfsdk:"disk_iops"` + // The size of each disk (in GiB) launched for each instance. Values must + // fall into the supported range for a particular instance type. + // + // For AWS: - General Purpose SSD: 100 - 4096 GiB - Throughput Optimized + // HDD: 500 - 4096 GiB + // + // For Azure: - Premium LRS (SSD): 1 - 1023 GiB - Standard LRS (HDD): 1- + // 1023 GiB + DiskSize types.Int64 `tfsdk:"disk_size"` + + DiskThroughput types.Int64 `tfsdk:"disk_throughput"` + // The type of disks that will be launched with this cluster. + DiskType *DiskType `tfsdk:"disk_type"` +} + +type DiskType struct { + AzureDiskVolumeType DiskTypeAzureDiskVolumeType `tfsdk:"azure_disk_volume_type"` + + EbsVolumeType DiskTypeEbsVolumeType `tfsdk:"ebs_volume_type"` +} + +type DiskTypeAzureDiskVolumeType string + +const DiskTypeAzureDiskVolumeTypePremiumLrs DiskTypeAzureDiskVolumeType = `PREMIUM_LRS` + +const DiskTypeAzureDiskVolumeTypeStandardLrs DiskTypeAzureDiskVolumeType = `STANDARD_LRS` + +// String representation for [fmt.Print] +func (f *DiskTypeAzureDiskVolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DiskTypeAzureDiskVolumeType) Set(v string) error { + switch v { + case `PREMIUM_LRS`, `STANDARD_LRS`: + *f = DiskTypeAzureDiskVolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PREMIUM_LRS", "STANDARD_LRS"`, v) + } +} + +// Type always returns DiskTypeAzureDiskVolumeType to satisfy [pflag.Value] interface +func (f *DiskTypeAzureDiskVolumeType) Type() string { + return "DiskTypeAzureDiskVolumeType" +} + +type DiskTypeEbsVolumeType string + +const DiskTypeEbsVolumeTypeGeneralPurposeSsd DiskTypeEbsVolumeType = `GENERAL_PURPOSE_SSD` + +const DiskTypeEbsVolumeTypeThroughputOptimizedHdd DiskTypeEbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD` + +// String representation for [fmt.Print] +func (f *DiskTypeEbsVolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DiskTypeEbsVolumeType) Set(v string) error { + switch v { + case `GENERAL_PURPOSE_SSD`, `THROUGHPUT_OPTIMIZED_HDD`: + *f = DiskTypeEbsVolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GENERAL_PURPOSE_SSD", "THROUGHPUT_OPTIMIZED_HDD"`, v) + } +} + +// Type always returns DiskTypeEbsVolumeType to satisfy [pflag.Value] interface +func (f *DiskTypeEbsVolumeType) Type() string { + return "DiskTypeEbsVolumeType" +} + +type DockerBasicAuth struct { + // Password of the user + Password types.String `tfsdk:"password"` + // Name of the user + Username types.String `tfsdk:"username"` +} + +type DockerImage struct { + BasicAuth *DockerBasicAuth `tfsdk:"basic_auth"` + // URL of the docker image. + Url types.String `tfsdk:"url"` +} + +// The type of EBS volumes that will be launched with this cluster. +type EbsVolumeType string + +const EbsVolumeTypeGeneralPurposeSsd EbsVolumeType = `GENERAL_PURPOSE_SSD` + +const EbsVolumeTypeThroughputOptimizedHdd EbsVolumeType = `THROUGHPUT_OPTIMIZED_HDD` + +// String representation for [fmt.Print] +func (f *EbsVolumeType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EbsVolumeType) Set(v string) error { + switch v { + case `GENERAL_PURPOSE_SSD`, `THROUGHPUT_OPTIMIZED_HDD`: + *f = EbsVolumeType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GENERAL_PURPOSE_SSD", "THROUGHPUT_OPTIMIZED_HDD"`, v) + } +} + +// Type always returns EbsVolumeType to satisfy [pflag.Value] interface +func (f *EbsVolumeType) Type() string { + return "EbsVolumeType" +} + +type EditCluster struct { + // When set to true, fixed and default values from the policy will be used + // for fields that are omitted. When set to false, only fixed values from + // the policy will be applied. + ApplyPolicyDefaultValues types.Bool `tfsdk:"apply_policy_default_values"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `tfsdk:"autoscale"` + // Automatically terminates the cluster after it is inactive for this time + // in minutes. If not set, this cluster will not be automatically + // terminated. If specified, the threshold must be between 10 and 10000 + // minutes. Users can also set this value to 0 to explicitly disable + // automatic termination. + AutoterminationMinutes types.Int64 `tfsdk:"autotermination_minutes"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *AwsAttributes `tfsdk:"aws_attributes"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *AzureAttributes `tfsdk:"azure_attributes"` + // ID of the cluser + ClusterId types.String `tfsdk:"cluster_id"` + // The configuration for delivering spark logs to a long-term storage + // destination. Two kinds of destinations (dbfs and s3) are supported. Only + // one destination can be specified for one cluster. If the conf is given, + // the logs will be delivered to the destination every `5 mins`. The + // destination of driver logs is `$destination/$clusterId/driver`, while the + // destination of executor logs is `$destination/$clusterId/executor`. + ClusterLogConf *ClusterLogConf `tfsdk:"cluster_log_conf"` + // Cluster name requested by the user. This doesn't have to be unique. If + // not specified at creation, the cluster name will be an empty string. + ClusterName types.String `tfsdk:"cluster_name"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // Data security mode decides what data governance model to use when + // accessing data from a cluster. + // + // * `NONE`: No security isolation for multiple users sharing the cluster. + // Data governance features are not available in this mode. * `SINGLE_USER`: + // A secure cluster that can only be exclusively used by a single user + // specified in `single_user_name`. Most programming languages, cluster + // features and data governance features are available in this mode. * + // `USER_ISOLATION`: A secure cluster that can be shared by multiple users. + // Cluster users are fully isolated so that they cannot see each other's + // data and credentials. Most data governance features are supported in this + // mode. But programming languages and cluster features might be limited. + // + // The following modes are deprecated starting with Databricks Runtime 15.0 + // and will be removed for future Databricks Runtime versions: + // + // * `LEGACY_TABLE_ACL`: This mode is for users migrating from legacy Table + // ACL clusters. * `LEGACY_PASSTHROUGH`: This mode is for users migrating + // from legacy Passthrough on high concurrency clusters. * + // `LEGACY_SINGLE_USER`: This mode is for users migrating from legacy + // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This + // mode provides a way that doesn’t have UC nor passthrough enabled. + DataSecurityMode DataSecurityMode `tfsdk:"data_security_mode"` + + DockerImage *DockerImage `tfsdk:"docker_image"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId types.String `tfsdk:"driver_instance_pool_id"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId types.String `tfsdk:"driver_node_type_id"` + // Autoscaling Local Storage: when enabled, this cluster will dynamically + // acquire additional disk space when its Spark workers are running low on + // disk space. This feature requires specific AWS permissions to function + // correctly - refer to the User Guide for more details. + EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk"` + // Whether to enable LUKS on cluster VMs' local disks + EnableLocalDiskEncryption types.Bool `tfsdk:"enable_local_disk_encryption"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *GcpAttributes `tfsdk:"gcp_attributes"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []InitScriptInfo `tfsdk:"init_scripts"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId types.String `tfsdk:"instance_pool_id"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers types.Int64 `tfsdk:"num_workers"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId types.String `tfsdk:"policy_id"` + // Decides which runtime engine to be use, e.g. Standard vs. Photon. If + // unspecified, the runtime engine is inferred from spark_version. + RuntimeEngine RuntimeEngine `tfsdk:"runtime_engine"` + // Single user name if data_security_mode is `SINGLE_USER` + SingleUserName types.String `tfsdk:"single_user_name"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. Users can also pass in a string of extra + // JVM options to the driver and the executors via + // `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` + // respectively. + SparkConf map[string]types.String `tfsdk:"spark_conf"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]types.String `tfsdk:"spark_env_vars"` + // The Spark version of the cluster, e.g. `3.3.x-scala2.11`. A list of + // available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + SparkVersion types.String `tfsdk:"spark_version"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []types.String `tfsdk:"ssh_public_keys"` + + WorkloadType *WorkloadType `tfsdk:"workload_type"` +} + +type EditClusterResponse struct { +} + +type EditInstancePool struct { + // Additional tags for pool resources. Databricks will tag all pool + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // Automatically terminates the extra instances in the pool cache after they + // are inactive for this time in minutes if min_idle_instances requirement + // is already met. If not set, the extra pool instances will be + // automatically terminated after a default timeout. If specified, the + // threshold must be between 0 and 10000 minutes. Users can also set this + // value to 0 to instantly remove idle instances from the cache if min cache + // size could still hold. + IdleInstanceAutoterminationMinutes types.Int64 `tfsdk:"idle_instance_autotermination_minutes"` + // Instance pool ID + InstancePoolId types.String `tfsdk:"instance_pool_id"` + // Pool name requested by the user. Pool name must be unique. Length must be + // between 1 and 100 characters. + InstancePoolName types.String `tfsdk:"instance_pool_name"` + // Maximum number of outstanding instances to keep in the pool, including + // both instances used by clusters and idle instances. Clusters that require + // further instance provisioning will fail during upsize requests. + MaxCapacity types.Int64 `tfsdk:"max_capacity"` + // Minimum number of idle instances to keep in the instance pool + MinIdleInstances types.Int64 `tfsdk:"min_idle_instances"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` +} + +type EditInstancePoolResponse struct { +} + +type EditPolicy struct { + // Policy definition document expressed in [Databricks Cluster Policy + // Definition Language]. + // + // [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + Definition types.String `tfsdk:"definition"` + // Additional human-readable description of the cluster policy. + Description types.String `tfsdk:"description"` + // A list of libraries to be installed on the next cluster restart that uses + // this policy. The maximum number of libraries is 500. + Libraries []Library `tfsdk:"libraries"` + // Max number of clusters per user that can be active using this policy. If + // not present, there is no max limit. + MaxClustersPerUser types.Int64 `tfsdk:"max_clusters_per_user"` + // Cluster Policy name requested by the user. This has to be unique. Length + // must be between 1 and 100 characters. + Name types.String `tfsdk:"name"` + // Policy definition JSON document expressed in [Databricks Policy + // Definition Language]. The JSON document must be passed as a string and + // cannot be embedded in the requests. + // + // You can use this to customize the policy definition inherited from the + // policy family. Policy rules specified here are merged into the inherited + // policy definition. + // + // [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + PolicyFamilyDefinitionOverrides types.String `tfsdk:"policy_family_definition_overrides"` + // ID of the policy family. The cluster policy's policy definition inherits + // the policy family's policy definition. + // + // Cannot be used with `definition`. Use + // `policy_family_definition_overrides` instead to customize the policy + // definition. + PolicyFamilyId types.String `tfsdk:"policy_family_id"` + // The ID of the policy to update. + PolicyId types.String `tfsdk:"policy_id"` +} + +type EditPolicyResponse struct { +} + +type EditResponse struct { +} + +// The environment entity used to preserve serverless environment side panel and +// jobs' environment for non-notebook task. In this minimal environment spec, +// only pip dependencies are supported. +type Environment struct { + // Client version used by the environment The client is the user-facing + // environment of the runtime. Each client comes with a specific set of + // pre-installed libraries. The version is a string, consisting of the major + // client version. + Client types.String `tfsdk:"client"` + // List of pip dependencies, as supported by the version of pip in this + // environment. Each dependency is a pip requirement file line + // https://pip.pypa.io/en/stable/reference/requirements-file-format/ Allowed + // dependency could be , , (WSFS or Volumes in Databricks), E.g. + // dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] + Dependencies []types.String `tfsdk:"dependencies"` +} + +type EventDetails struct { + // * For created clusters, the attributes of the cluster. * For edited + // clusters, the new attributes of the cluster. + Attributes *ClusterAttributes `tfsdk:"attributes"` + // The cause of a change in target size. + Cause EventDetailsCause `tfsdk:"cause"` + // The actual cluster size that was set in the cluster creation or edit. + ClusterSize *ClusterSize `tfsdk:"cluster_size"` + // The current number of vCPUs in the cluster. + CurrentNumVcpus types.Int64 `tfsdk:"current_num_vcpus"` + // The current number of nodes in the cluster. + CurrentNumWorkers types.Int64 `tfsdk:"current_num_workers"` + // + DidNotExpandReason types.String `tfsdk:"did_not_expand_reason"` + // Current disk size in bytes + DiskSize types.Int64 `tfsdk:"disk_size"` + // More details about the change in driver's state + DriverStateMessage types.String `tfsdk:"driver_state_message"` + // Whether or not a blocklisted node should be terminated. For + // ClusterEventType NODE_BLACKLISTED. + EnableTerminationForNodeBlocklisted types.Bool `tfsdk:"enable_termination_for_node_blocklisted"` + // + FreeSpace types.Int64 `tfsdk:"free_space"` + // List of global and cluster init scripts associated with this cluster + // event. + InitScripts *InitScriptEventDetails `tfsdk:"init_scripts"` + // Instance Id where the event originated from + InstanceId types.String `tfsdk:"instance_id"` + // Unique identifier of the specific job run associated with this cluster + // event * For clusters created for jobs, this will be the same as the + // cluster name + JobRunName types.String `tfsdk:"job_run_name"` + // The cluster attributes before a cluster was edited. + PreviousAttributes *ClusterAttributes `tfsdk:"previous_attributes"` + // The size of the cluster before an edit or resize. + PreviousClusterSize *ClusterSize `tfsdk:"previous_cluster_size"` + // Previous disk size in bytes + PreviousDiskSize types.Int64 `tfsdk:"previous_disk_size"` + // A termination reason: * On a TERMINATED event, this is the reason of the + // termination. * On a RESIZE_COMPLETE event, this indicates the reason that + // we failed to acquire some nodes. + Reason *TerminationReason `tfsdk:"reason"` + // The targeted number of vCPUs in the cluster. + TargetNumVcpus types.Int64 `tfsdk:"target_num_vcpus"` + // The targeted number of nodes in the cluster. + TargetNumWorkers types.Int64 `tfsdk:"target_num_workers"` + // The user that caused the event to occur. (Empty if it was done by the + // control plane.) + User types.String `tfsdk:"user"` +} + +// The cause of a change in target size. +type EventDetailsCause string + +const EventDetailsCauseAutorecovery EventDetailsCause = `AUTORECOVERY` + +const EventDetailsCauseAutoscale EventDetailsCause = `AUTOSCALE` + +const EventDetailsCauseReplaceBadNodes EventDetailsCause = `REPLACE_BAD_NODES` + +const EventDetailsCauseUserRequest EventDetailsCause = `USER_REQUEST` + +// String representation for [fmt.Print] +func (f *EventDetailsCause) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EventDetailsCause) Set(v string) error { + switch v { + case `AUTORECOVERY`, `AUTOSCALE`, `REPLACE_BAD_NODES`, `USER_REQUEST`: + *f = EventDetailsCause(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUTORECOVERY", "AUTOSCALE", "REPLACE_BAD_NODES", "USER_REQUEST"`, v) + } +} + +// Type always returns EventDetailsCause to satisfy [pflag.Value] interface +func (f *EventDetailsCause) Type() string { + return "EventDetailsCause" +} + +type EventType string + +const EventTypeAutoscalingStatsReport EventType = `AUTOSCALING_STATS_REPORT` + +const EventTypeCreating EventType = `CREATING` + +const EventTypeDbfsDown EventType = `DBFS_DOWN` + +const EventTypeDidNotExpandDisk EventType = `DID_NOT_EXPAND_DISK` + +const EventTypeDriverHealthy EventType = `DRIVER_HEALTHY` + +const EventTypeDriverNotResponding EventType = `DRIVER_NOT_RESPONDING` + +const EventTypeDriverUnavailable EventType = `DRIVER_UNAVAILABLE` + +const EventTypeEdited EventType = `EDITED` + +const EventTypeExpandedDisk EventType = `EXPANDED_DISK` + +const EventTypeFailedToExpandDisk EventType = `FAILED_TO_EXPAND_DISK` + +const EventTypeInitScriptsFinished EventType = `INIT_SCRIPTS_FINISHED` + +const EventTypeInitScriptsStarted EventType = `INIT_SCRIPTS_STARTED` + +const EventTypeMetastoreDown EventType = `METASTORE_DOWN` + +const EventTypeNodesLost EventType = `NODES_LOST` + +const EventTypeNodeBlacklisted EventType = `NODE_BLACKLISTED` + +const EventTypeNodeExcludedDecommissioned EventType = `NODE_EXCLUDED_DECOMMISSIONED` + +const EventTypePinned EventType = `PINNED` + +const EventTypeResizing EventType = `RESIZING` + +const EventTypeRestarting EventType = `RESTARTING` + +const EventTypeRunning EventType = `RUNNING` + +const EventTypeSparkException EventType = `SPARK_EXCEPTION` + +const EventTypeStarting EventType = `STARTING` + +const EventTypeTerminating EventType = `TERMINATING` + +const EventTypeUnpinned EventType = `UNPINNED` + +const EventTypeUpsizeCompleted EventType = `UPSIZE_COMPLETED` + +// String representation for [fmt.Print] +func (f *EventType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EventType) Set(v string) error { + switch v { + case `AUTOSCALING_STATS_REPORT`, `CREATING`, `DBFS_DOWN`, `DID_NOT_EXPAND_DISK`, `DRIVER_HEALTHY`, `DRIVER_NOT_RESPONDING`, `DRIVER_UNAVAILABLE`, `EDITED`, `EXPANDED_DISK`, `FAILED_TO_EXPAND_DISK`, `INIT_SCRIPTS_FINISHED`, `INIT_SCRIPTS_STARTED`, `METASTORE_DOWN`, `NODES_LOST`, `NODE_BLACKLISTED`, `NODE_EXCLUDED_DECOMMISSIONED`, `PINNED`, `RESIZING`, `RESTARTING`, `RUNNING`, `SPARK_EXCEPTION`, `STARTING`, `TERMINATING`, `UNPINNED`, `UPSIZE_COMPLETED`: + *f = EventType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUTOSCALING_STATS_REPORT", "CREATING", "DBFS_DOWN", "DID_NOT_EXPAND_DISK", "DRIVER_HEALTHY", "DRIVER_NOT_RESPONDING", "DRIVER_UNAVAILABLE", "EDITED", "EXPANDED_DISK", "FAILED_TO_EXPAND_DISK", "INIT_SCRIPTS_FINISHED", "INIT_SCRIPTS_STARTED", "METASTORE_DOWN", "NODES_LOST", "NODE_BLACKLISTED", "NODE_EXCLUDED_DECOMMISSIONED", "PINNED", "RESIZING", "RESTARTING", "RUNNING", "SPARK_EXCEPTION", "STARTING", "TERMINATING", "UNPINNED", "UPSIZE_COMPLETED"`, v) + } +} + +// Type always returns EventType to satisfy [pflag.Value] interface +func (f *EventType) Type() string { + return "EventType" +} + +type GcpAttributes struct { + // This field determines whether the instance pool will contain preemptible + // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs + // if the former is unavailable. + Availability GcpAvailability `tfsdk:"availability"` + // boot disk size in GB + BootDiskSize types.Int64 `tfsdk:"boot_disk_size"` + // If provided, the cluster will impersonate the google service account when + // accessing gcloud services (like GCS). The google service account must + // have previously been added to the Databricks environment by an account + // administrator. + GoogleServiceAccount types.String `tfsdk:"google_service_account"` + // If provided, each node (workers and driver) in the cluster will have this + // number of local SSDs attached. Each local SSD is 375GB in size. Refer to + // [GCP documentation] for the supported number of local SSDs for each + // instance type. + // + // [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds + LocalSsdCount types.Int64 `tfsdk:"local_ssd_count"` + // This field determines whether the spark executors will be scheduled to + // run on preemptible VMs (when set to true) versus standard compute engine + // VMs (when set to false; default). Note: Soon to be deprecated, use the + // availability field instead. + UsePreemptibleExecutors types.Bool `tfsdk:"use_preemptible_executors"` + // Identifier for the availability zone in which the cluster resides. This + // can be one of the following: - "HA" => High availability, spread nodes + // across availability zones for a Databricks deployment region [default] - + // "AUTO" => Databricks picks an availability zone to schedule the cluster + // on. - A GCP availability zone => Pick One of the available zones for + // (machine type + region) from + // https://cloud.google.com/compute/docs/regions-zones. + ZoneId types.String `tfsdk:"zone_id"` +} + +// This field determines whether the instance pool will contain preemptible VMs, +// on-demand VMs, or preemptible VMs with a fallback to on-demand VMs if the +// former is unavailable. +type GcpAvailability string + +const GcpAvailabilityOnDemandGcp GcpAvailability = `ON_DEMAND_GCP` + +const GcpAvailabilityPreemptibleGcp GcpAvailability = `PREEMPTIBLE_GCP` + +const GcpAvailabilityPreemptibleWithFallbackGcp GcpAvailability = `PREEMPTIBLE_WITH_FALLBACK_GCP` + +// String representation for [fmt.Print] +func (f *GcpAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GcpAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_GCP`, `PREEMPTIBLE_GCP`, `PREEMPTIBLE_WITH_FALLBACK_GCP`: + *f = GcpAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_GCP", "PREEMPTIBLE_GCP", "PREEMPTIBLE_WITH_FALLBACK_GCP"`, v) + } +} + +// Type always returns GcpAvailability to satisfy [pflag.Value] interface +func (f *GcpAvailability) Type() string { + return "GcpAvailability" +} + +type GcsStorageInfo struct { + // GCS destination/URI, e.g. `gs://my-bucket/some-prefix` + Destination types.String `tfsdk:"destination"` +} + +// Get cluster permission levels +type GetClusterPermissionLevelsRequest struct { + // The cluster for which to get or manage permissions. + ClusterId types.String `tfsdk:"-" url:"-"` +} + +type GetClusterPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []ClusterPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get cluster permissions +type GetClusterPermissionsRequest struct { + // The cluster for which to get or manage permissions. + ClusterId types.String `tfsdk:"-" url:"-"` +} + +// Get cluster policy permission levels +type GetClusterPolicyPermissionLevelsRequest struct { + // The cluster policy for which to get or manage permissions. + ClusterPolicyId types.String `tfsdk:"-" url:"-"` +} + +type GetClusterPolicyPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []ClusterPolicyPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get cluster policy permissions +type GetClusterPolicyPermissionsRequest struct { + // The cluster policy for which to get or manage permissions. + ClusterPolicyId types.String `tfsdk:"-" url:"-"` +} + +// Get a cluster policy +type GetClusterPolicyRequest struct { + // Canonical unique identifier for the cluster policy. + PolicyId types.String `tfsdk:"-" url:"policy_id"` +} + +// Get cluster info +type GetClusterRequest struct { + // The cluster about which to retrieve information. + ClusterId types.String `tfsdk:"-" url:"cluster_id"` +} + +type GetEvents struct { + // The ID of the cluster to retrieve events about. + ClusterId types.String `tfsdk:"cluster_id"` + // The end time in epoch milliseconds. If empty, returns events up to the + // current time. + EndTime types.Int64 `tfsdk:"end_time"` + // An optional set of event types to filter on. If empty, all event types + // are returned. + EventTypes []EventType `tfsdk:"event_types"` + // The maximum number of events to include in a page of events. Defaults to + // 50, and maximum allowed value is 500. + Limit types.Int64 `tfsdk:"limit"` + // The offset in the result set. Defaults to 0 (no offset). When an offset + // is specified and the results are requested in descending order, the + // end_time field is required. + Offset types.Int64 `tfsdk:"offset"` + // The order to list events in; either "ASC" or "DESC". Defaults to "DESC". + Order GetEventsOrder `tfsdk:"order"` + // The start time in epoch milliseconds. If empty, returns events starting + // from the beginning of time. + StartTime types.Int64 `tfsdk:"start_time"` +} + +// The order to list events in; either "ASC" or "DESC". Defaults to "DESC". +type GetEventsOrder string + +const GetEventsOrderAsc GetEventsOrder = `ASC` + +const GetEventsOrderDesc GetEventsOrder = `DESC` + +// String representation for [fmt.Print] +func (f *GetEventsOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetEventsOrder) Set(v string) error { + switch v { + case `ASC`, `DESC`: + *f = GetEventsOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ASC", "DESC"`, v) + } +} + +// Type always returns GetEventsOrder to satisfy [pflag.Value] interface +func (f *GetEventsOrder) Type() string { + return "GetEventsOrder" +} + +type GetEventsResponse struct { + // + Events []ClusterEvent `tfsdk:"events"` + // The parameters required to retrieve the next page of events. Omitted if + // there are no more events to read. + NextPage *GetEvents `tfsdk:"next_page"` + // The total number of events filtered by the start_time, end_time, and + // event_types. + TotalCount types.Int64 `tfsdk:"total_count"` +} + +// Get an init script +type GetGlobalInitScriptRequest struct { + // The ID of the global init script. + ScriptId types.String `tfsdk:"-" url:"-"` +} + +type GetInstancePool struct { + // Attributes related to instance pools running on Amazon Web Services. If + // not specified at pool creation, a set of default values will be used. + AwsAttributes *InstancePoolAwsAttributes `tfsdk:"aws_attributes"` + // Attributes related to instance pools running on Azure. If not specified + // at pool creation, a set of default values will be used. + AzureAttributes *InstancePoolAzureAttributes `tfsdk:"azure_attributes"` + // Additional tags for pool resources. Databricks will tag all pool + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // Tags that are added by Databricks regardless of any `custom_tags`, + // including: + // + // - Vendor: Databricks + // + // - InstancePoolCreator: + // + // - InstancePoolName: + // + // - InstancePoolId: + DefaultTags map[string]types.String `tfsdk:"default_tags"` + // Defines the specification of the disks that will be attached to all spark + // containers. + DiskSpec *DiskSpec `tfsdk:"disk_spec"` + // Autoscaling Local Storage: when enabled, this instances in this pool will + // dynamically acquire additional disk space when its Spark workers are + // running low on disk space. In AWS, this feature requires specific AWS + // permissions to function correctly - refer to the User Guide for more + // details. + EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk"` + // Attributes related to instance pools running on Google Cloud Platform. If + // not specified at pool creation, a set of default values will be used. + GcpAttributes *InstancePoolGcpAttributes `tfsdk:"gcp_attributes"` + // Automatically terminates the extra instances in the pool cache after they + // are inactive for this time in minutes if min_idle_instances requirement + // is already met. If not set, the extra pool instances will be + // automatically terminated after a default timeout. If specified, the + // threshold must be between 0 and 10000 minutes. Users can also set this + // value to 0 to instantly remove idle instances from the cache if min cache + // size could still hold. + IdleInstanceAutoterminationMinutes types.Int64 `tfsdk:"idle_instance_autotermination_minutes"` + // Canonical unique identifier for the pool. + InstancePoolId types.String `tfsdk:"instance_pool_id"` + // Pool name requested by the user. Pool name must be unique. Length must be + // between 1 and 100 characters. + InstancePoolName types.String `tfsdk:"instance_pool_name"` + // Maximum number of outstanding instances to keep in the pool, including + // both instances used by clusters and idle instances. Clusters that require + // further instance provisioning will fail during upsize requests. + MaxCapacity types.Int64 `tfsdk:"max_capacity"` + // Minimum number of idle instances to keep in the instance pool + MinIdleInstances types.Int64 `tfsdk:"min_idle_instances"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` + // Custom Docker Image BYOC + PreloadedDockerImages []DockerImage `tfsdk:"preloaded_docker_images"` + // A list containing at most one preloaded Spark image version for the pool. + // Pool-backed clusters started with the preloaded Spark version will start + // faster. A list of available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + PreloadedSparkVersions []types.String `tfsdk:"preloaded_spark_versions"` + // Current state of the instance pool. + State InstancePoolState `tfsdk:"state"` + // Usage statistics about the instance pool. + Stats *InstancePoolStats `tfsdk:"stats"` + // Status of failed pending instances in the pool. + Status *InstancePoolStatus `tfsdk:"status"` +} + +// Get instance pool permission levels +type GetInstancePoolPermissionLevelsRequest struct { + // The instance pool for which to get or manage permissions. + InstancePoolId types.String `tfsdk:"-" url:"-"` +} + +type GetInstancePoolPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []InstancePoolPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get instance pool permissions +type GetInstancePoolPermissionsRequest struct { + // The instance pool for which to get or manage permissions. + InstancePoolId types.String `tfsdk:"-" url:"-"` +} + +// Get instance pool information +type GetInstancePoolRequest struct { + // The canonical unique identifier for the instance pool. + InstancePoolId types.String `tfsdk:"-" url:"instance_pool_id"` +} + +// Get policy family information +type GetPolicyFamilyRequest struct { + PolicyFamilyId types.String `tfsdk:"-" url:"-"` +} + +type GetSparkVersionsResponse struct { + // All the available Spark versions. + Versions []SparkVersion `tfsdk:"versions"` +} + +type GlobalInitScriptCreateRequest struct { + // Specifies whether the script is enabled. The script runs only if enabled. + Enabled types.Bool `tfsdk:"enabled"` + // The name of the script + Name types.String `tfsdk:"name"` + // The position of a global init script, where 0 represents the first script + // to run, 1 is the second script to run, in ascending order. + // + // If you omit the numeric position for a new global init script, it + // defaults to last position. It will run after all current scripts. Setting + // any value greater than the position of the last script is equivalent to + // the last position. Example: Take three existing scripts with positions 0, + // 1, and 2. Any position of (3) or greater puts the script in the last + // position. If an explicit position value conflicts with an existing script + // value, your request succeeds, but the original script at that position + // and all later scripts have their positions incremented by 1. + Position types.Int64 `tfsdk:"position"` + // The Base64-encoded content of the script. + Script types.String `tfsdk:"script"` +} + +type GlobalInitScriptDetails struct { + // Time when the script was created, represented as a Unix timestamp in + // milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // The username of the user who created the script. + CreatedBy types.String `tfsdk:"created_by"` + // Specifies whether the script is enabled. The script runs only if enabled. + Enabled types.Bool `tfsdk:"enabled"` + // The name of the script + Name types.String `tfsdk:"name"` + // The position of a script, where 0 represents the first script to run, 1 + // is the second script to run, in ascending order. + Position types.Int64 `tfsdk:"position"` + // The global init script ID. + ScriptId types.String `tfsdk:"script_id"` + // Time when the script was updated, represented as a Unix timestamp in + // milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // The username of the user who last updated the script + UpdatedBy types.String `tfsdk:"updated_by"` +} + +type GlobalInitScriptDetailsWithContent struct { + // Time when the script was created, represented as a Unix timestamp in + // milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // The username of the user who created the script. + CreatedBy types.String `tfsdk:"created_by"` + // Specifies whether the script is enabled. The script runs only if enabled. + Enabled types.Bool `tfsdk:"enabled"` + // The name of the script + Name types.String `tfsdk:"name"` + // The position of a script, where 0 represents the first script to run, 1 + // is the second script to run, in ascending order. + Position types.Int64 `tfsdk:"position"` + // The Base64-encoded content of the script. + Script types.String `tfsdk:"script"` + // The global init script ID. + ScriptId types.String `tfsdk:"script_id"` + // Time when the script was updated, represented as a Unix timestamp in + // milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // The username of the user who last updated the script + UpdatedBy types.String `tfsdk:"updated_by"` +} + +type GlobalInitScriptUpdateRequest struct { + // Specifies whether the script is enabled. The script runs only if enabled. + Enabled types.Bool `tfsdk:"enabled"` + // The name of the script + Name types.String `tfsdk:"name"` + // The position of a script, where 0 represents the first script to run, 1 + // is the second script to run, in ascending order. To move the script to + // run first, set its position to 0. + // + // To move the script to the end, set its position to any value greater or + // equal to the position of the last script. Example, three existing scripts + // with positions 0, 1, and 2. Any position value of 2 or greater puts the + // script in the last position (2). + // + // If an explicit position value conflicts with an existing script, your + // request succeeds, but the original script at that position and all later + // scripts have their positions incremented by 1. + Position types.Int64 `tfsdk:"position"` + // The Base64-encoded content of the script. + Script types.String `tfsdk:"script"` + // The ID of the global init script. + ScriptId types.String `tfsdk:"-" url:"-"` +} + +type InitScriptEventDetails struct { + // The cluster scoped init scripts associated with this cluster event + Cluster []InitScriptInfoAndExecutionDetails `tfsdk:"cluster"` + // The global init scripts associated with this cluster event + Global []InitScriptInfoAndExecutionDetails `tfsdk:"global"` + // The private ip address of the node where the init scripts were run. + ReportedForNode types.String `tfsdk:"reported_for_node"` +} + +type InitScriptExecutionDetails struct { + // Addition details regarding errors. + ErrorMessage types.String `tfsdk:"error_message"` + // The duration of the script execution in seconds. + ExecutionDurationSeconds types.Int64 `tfsdk:"execution_duration_seconds"` + // The current status of the script + Status InitScriptExecutionDetailsStatus `tfsdk:"status"` +} + +// The current status of the script +type InitScriptExecutionDetailsStatus string + +const InitScriptExecutionDetailsStatusFailedExecution InitScriptExecutionDetailsStatus = `FAILED_EXECUTION` + +const InitScriptExecutionDetailsStatusFailedFetch InitScriptExecutionDetailsStatus = `FAILED_FETCH` + +const InitScriptExecutionDetailsStatusNotExecuted InitScriptExecutionDetailsStatus = `NOT_EXECUTED` + +const InitScriptExecutionDetailsStatusSkipped InitScriptExecutionDetailsStatus = `SKIPPED` + +const InitScriptExecutionDetailsStatusSucceeded InitScriptExecutionDetailsStatus = `SUCCEEDED` + +const InitScriptExecutionDetailsStatusUnknown InitScriptExecutionDetailsStatus = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *InitScriptExecutionDetailsStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InitScriptExecutionDetailsStatus) Set(v string) error { + switch v { + case `FAILED_EXECUTION`, `FAILED_FETCH`, `NOT_EXECUTED`, `SKIPPED`, `SUCCEEDED`, `UNKNOWN`: + *f = InitScriptExecutionDetailsStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED_EXECUTION", "FAILED_FETCH", "NOT_EXECUTED", "SKIPPED", "SUCCEEDED", "UNKNOWN"`, v) + } +} + +// Type always returns InitScriptExecutionDetailsStatus to satisfy [pflag.Value] interface +func (f *InitScriptExecutionDetailsStatus) Type() string { + return "InitScriptExecutionDetailsStatus" +} + +type InitScriptInfo struct { + // destination needs to be provided. e.g. `{ "abfss" : { "destination" : + // "abfss://@.dfs.core.windows.net/" + // } } + Abfss *Adlsgen2Info `tfsdk:"abfss"` + // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `tfsdk:"dbfs"` + // destination needs to be provided. e.g. `{ "file" : { "destination" : + // "file:/my/local/file.sh" } }` + File *LocalFileInfo `tfsdk:"file"` + // destination needs to be provided. e.g. `{ "gcs": { "destination": + // "gs://my-bucket/file.sh" } }` + Gcs *GcsStorageInfo `tfsdk:"gcs"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : + // "us-west-2" } }` Cluster iam role is used to access s3, please make sure + // the cluster iam role in `instance_profile_arn` has permission to write + // data to the s3 destination. + S3 *S3StorageInfo `tfsdk:"s3"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/my-init.sh" } }` + Volumes *VolumesStorageInfo `tfsdk:"volumes"` + // destination needs to be provided. e.g. `{ "workspace" : { "destination" : + // "/Users/user1@databricks.com/my-init.sh" } }` + Workspace *WorkspaceStorageInfo `tfsdk:"workspace"` +} + +type InitScriptInfoAndExecutionDetails struct { + // Details about the script + ExecutionDetails *InitScriptExecutionDetails `tfsdk:"execution_details"` + // The script + Script *InitScriptInfo `tfsdk:"script"` +} + +type InstallLibraries struct { + // Unique identifier for the cluster on which to install these libraries. + ClusterId types.String `tfsdk:"cluster_id"` + // The libraries to install. + Libraries []Library `tfsdk:"libraries"` +} + +type InstallLibrariesResponse struct { +} + +type InstancePoolAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel InstancePoolPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type InstancePoolAccessControlResponse struct { + // All permissions. + AllPermissions []InstancePoolPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type InstancePoolAndStats struct { + // Attributes related to instance pools running on Amazon Web Services. If + // not specified at pool creation, a set of default values will be used. + AwsAttributes *InstancePoolAwsAttributes `tfsdk:"aws_attributes"` + // Attributes related to instance pools running on Azure. If not specified + // at pool creation, a set of default values will be used. + AzureAttributes *InstancePoolAzureAttributes `tfsdk:"azure_attributes"` + // Additional tags for pool resources. Databricks will tag all pool + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // Tags that are added by Databricks regardless of any `custom_tags`, + // including: + // + // - Vendor: Databricks + // + // - InstancePoolCreator: + // + // - InstancePoolName: + // + // - InstancePoolId: + DefaultTags map[string]types.String `tfsdk:"default_tags"` + // Defines the specification of the disks that will be attached to all spark + // containers. + DiskSpec *DiskSpec `tfsdk:"disk_spec"` + // Autoscaling Local Storage: when enabled, this instances in this pool will + // dynamically acquire additional disk space when its Spark workers are + // running low on disk space. In AWS, this feature requires specific AWS + // permissions to function correctly - refer to the User Guide for more + // details. + EnableElasticDisk types.Bool `tfsdk:"enable_elastic_disk"` + // Attributes related to instance pools running on Google Cloud Platform. If + // not specified at pool creation, a set of default values will be used. + GcpAttributes *InstancePoolGcpAttributes `tfsdk:"gcp_attributes"` + // Automatically terminates the extra instances in the pool cache after they + // are inactive for this time in minutes if min_idle_instances requirement + // is already met. If not set, the extra pool instances will be + // automatically terminated after a default timeout. If specified, the + // threshold must be between 0 and 10000 minutes. Users can also set this + // value to 0 to instantly remove idle instances from the cache if min cache + // size could still hold. + IdleInstanceAutoterminationMinutes types.Int64 `tfsdk:"idle_instance_autotermination_minutes"` + // Canonical unique identifier for the pool. + InstancePoolId types.String `tfsdk:"instance_pool_id"` + // Pool name requested by the user. Pool name must be unique. Length must be + // between 1 and 100 characters. + InstancePoolName types.String `tfsdk:"instance_pool_name"` + // Maximum number of outstanding instances to keep in the pool, including + // both instances used by clusters and idle instances. Clusters that require + // further instance provisioning will fail during upsize requests. + MaxCapacity types.Int64 `tfsdk:"max_capacity"` + // Minimum number of idle instances to keep in the instance pool + MinIdleInstances types.Int64 `tfsdk:"min_idle_instances"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` + // Custom Docker Image BYOC + PreloadedDockerImages []DockerImage `tfsdk:"preloaded_docker_images"` + // A list containing at most one preloaded Spark image version for the pool. + // Pool-backed clusters started with the preloaded Spark version will start + // faster. A list of available Spark versions can be retrieved by using the + // :method:clusters/sparkVersions API call. + PreloadedSparkVersions []types.String `tfsdk:"preloaded_spark_versions"` + // Current state of the instance pool. + State InstancePoolState `tfsdk:"state"` + // Usage statistics about the instance pool. + Stats *InstancePoolStats `tfsdk:"stats"` + // Status of failed pending instances in the pool. + Status *InstancePoolStatus `tfsdk:"status"` +} + +type InstancePoolAwsAttributes struct { + // Availability type used for the spot nodes. + // + // The default value is defined by + // InstancePoolConf.instancePoolDefaultAwsAvailability + Availability InstancePoolAwsAttributesAvailability `tfsdk:"availability"` + // Calculates the bid price for AWS spot instances, as a percentage of the + // corresponding instance type's on-demand price. For example, if this field + // is set to 50, and the cluster needs a new `r3.xlarge` spot instance, then + // the bid price is half of the price of on-demand `r3.xlarge` instances. + // Similarly, if this field is set to 200, the bid price is twice the price + // of on-demand `r3.xlarge` instances. If not specified, the default value + // is 100. When spot instances are requested for this cluster, only spot + // instances whose bid price percentage matches this field will be + // considered. Note that, for safety, we enforce this field to be no more + // than 10000. + // + // The default value and documentation here should be kept consistent with + // CommonConf.defaultSpotBidPricePercent and + // CommonConf.maxSpotBidPricePercent. + SpotBidPricePercent types.Int64 `tfsdk:"spot_bid_price_percent"` + // Identifier for the availability zone/datacenter in which the cluster + // resides. This string will be of a form like "us-west-2a". The provided + // availability zone must be in the same region as the Databricks + // deployment. For example, "us-west-2a" is not a valid zone id if the + // Databricks deployment resides in the "us-east-1" region. This is an + // optional field at cluster creation, and if not specified, a default zone + // will be used. The list of available zones as well as the default value + // can be found by using the `List Zones` method. + ZoneId types.String `tfsdk:"zone_id"` +} + +// Availability type used for the spot nodes. +// +// The default value is defined by +// InstancePoolConf.instancePoolDefaultAwsAvailability +type InstancePoolAwsAttributesAvailability string + +const InstancePoolAwsAttributesAvailabilityOnDemand InstancePoolAwsAttributesAvailability = `ON_DEMAND` + +const InstancePoolAwsAttributesAvailabilitySpot InstancePoolAwsAttributesAvailability = `SPOT` + +// String representation for [fmt.Print] +func (f *InstancePoolAwsAttributesAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstancePoolAwsAttributesAvailability) Set(v string) error { + switch v { + case `ON_DEMAND`, `SPOT`: + *f = InstancePoolAwsAttributesAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND", "SPOT"`, v) + } +} + +// Type always returns InstancePoolAwsAttributesAvailability to satisfy [pflag.Value] interface +func (f *InstancePoolAwsAttributesAvailability) Type() string { + return "InstancePoolAwsAttributesAvailability" +} + +type InstancePoolAzureAttributes struct { + // Shows the Availability type used for the spot nodes. + // + // The default value is defined by + // InstancePoolConf.instancePoolDefaultAzureAvailability + Availability InstancePoolAzureAttributesAvailability `tfsdk:"availability"` + // The default value and documentation here should be kept consistent with + // CommonConf.defaultSpotBidMaxPrice. + SpotBidMaxPrice types.Float64 `tfsdk:"spot_bid_max_price"` +} + +// Shows the Availability type used for the spot nodes. +// +// The default value is defined by +// InstancePoolConf.instancePoolDefaultAzureAvailability +type InstancePoolAzureAttributesAvailability string + +const InstancePoolAzureAttributesAvailabilityOnDemandAzure InstancePoolAzureAttributesAvailability = `ON_DEMAND_AZURE` + +const InstancePoolAzureAttributesAvailabilitySpotAzure InstancePoolAzureAttributesAvailability = `SPOT_AZURE` + +// String representation for [fmt.Print] +func (f *InstancePoolAzureAttributesAvailability) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstancePoolAzureAttributesAvailability) Set(v string) error { + switch v { + case `ON_DEMAND_AZURE`, `SPOT_AZURE`: + *f = InstancePoolAzureAttributesAvailability(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ON_DEMAND_AZURE", "SPOT_AZURE"`, v) + } +} + +// Type always returns InstancePoolAzureAttributesAvailability to satisfy [pflag.Value] interface +func (f *InstancePoolAzureAttributesAvailability) Type() string { + return "InstancePoolAzureAttributesAvailability" +} + +type InstancePoolGcpAttributes struct { + // This field determines whether the instance pool will contain preemptible + // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs + // if the former is unavailable. + GcpAvailability GcpAvailability `tfsdk:"gcp_availability"` + // If provided, each node in the instance pool will have this number of + // local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP + // documentation] for the supported number of local SSDs for each instance + // type. + // + // [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds + LocalSsdCount types.Int64 `tfsdk:"local_ssd_count"` + // Identifier for the availability zone/datacenter in which the cluster + // resides. This string will be of a form like "us-west1-a". The provided + // availability zone must be in the same region as the Databricks workspace. + // For example, "us-west1-a" is not a valid zone id if the Databricks + // workspace resides in the "us-east1" region. This is an optional field at + // instance pool creation, and if not specified, a default zone will be + // used. + // + // This field can be one of the following: - "HA" => High availability, + // spread nodes across availability zones for a Databricks deployment region + // - A GCP availability zone => Pick One of the available zones for (machine + // type + region) from https://cloud.google.com/compute/docs/regions-zones + // (e.g. "us-west1-a"). + // + // If empty, Databricks picks an availability zone to schedule the cluster + // on. + ZoneId types.String `tfsdk:"zone_id"` +} + +type InstancePoolPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel InstancePoolPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type InstancePoolPermissionLevel string + +const InstancePoolPermissionLevelCanAttachTo InstancePoolPermissionLevel = `CAN_ATTACH_TO` + +const InstancePoolPermissionLevelCanManage InstancePoolPermissionLevel = `CAN_MANAGE` + +// String representation for [fmt.Print] +func (f *InstancePoolPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstancePoolPermissionLevel) Set(v string) error { + switch v { + case `CAN_ATTACH_TO`, `CAN_MANAGE`: + *f = InstancePoolPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_ATTACH_TO", "CAN_MANAGE"`, v) + } +} + +// Type always returns InstancePoolPermissionLevel to satisfy [pflag.Value] interface +func (f *InstancePoolPermissionLevel) Type() string { + return "InstancePoolPermissionLevel" +} + +type InstancePoolPermissions struct { + AccessControlList []InstancePoolAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type InstancePoolPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel InstancePoolPermissionLevel `tfsdk:"permission_level"` +} + +type InstancePoolPermissionsRequest struct { + AccessControlList []InstancePoolAccessControlRequest `tfsdk:"access_control_list"` + // The instance pool for which to get or manage permissions. + InstancePoolId types.String `tfsdk:"-" url:"-"` +} + +// Current state of the instance pool. +type InstancePoolState string + +const InstancePoolStateActive InstancePoolState = `ACTIVE` + +const InstancePoolStateDeleted InstancePoolState = `DELETED` + +const InstancePoolStateStopped InstancePoolState = `STOPPED` + +// String representation for [fmt.Print] +func (f *InstancePoolState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstancePoolState) Set(v string) error { + switch v { + case `ACTIVE`, `DELETED`, `STOPPED`: + *f = InstancePoolState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "DELETED", "STOPPED"`, v) + } +} + +// Type always returns InstancePoolState to satisfy [pflag.Value] interface +func (f *InstancePoolState) Type() string { + return "InstancePoolState" +} + +type InstancePoolStats struct { + // Number of active instances in the pool that are NOT part of a cluster. + IdleCount types.Int64 `tfsdk:"idle_count"` + // Number of pending instances in the pool that are NOT part of a cluster. + PendingIdleCount types.Int64 `tfsdk:"pending_idle_count"` + // Number of pending instances in the pool that are part of a cluster. + PendingUsedCount types.Int64 `tfsdk:"pending_used_count"` + // Number of active instances in the pool that are part of a cluster. + UsedCount types.Int64 `tfsdk:"used_count"` +} + +type InstancePoolStatus struct { + // List of error messages for the failed pending instances. The + // pending_instance_errors follows FIFO with maximum length of the min_idle + // of the pool. The pending_instance_errors is emptied once the number of + // exiting available instances reaches the min_idle of the pool. + PendingInstanceErrors []PendingInstanceError `tfsdk:"pending_instance_errors"` +} + +type InstanceProfile struct { + // The AWS IAM role ARN of the role associated with the instance profile. + // This field is required if your role name and instance profile name do not + // match and you want to use the instance profile with [Databricks SQL + // Serverless]. + // + // Otherwise, this field is optional. + // + // [Databricks SQL Serverless]: https://docs.databricks.com/sql/admin/serverless.html + IamRoleArn types.String `tfsdk:"iam_role_arn"` + // The AWS ARN of the instance profile to register with Databricks. This + // field is required. + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // Boolean flag indicating whether the instance profile should only be used + // in credential passthrough scenarios. If true, it means the instance + // profile contains an meta IAM role which could assume a wide range of + // roles. Therefore it should always be used with authorization. This field + // is optional, the default value is `false`. + IsMetaInstanceProfile types.Bool `tfsdk:"is_meta_instance_profile"` +} + +type Language string + +const LanguagePython Language = `python` + +const LanguageScala Language = `scala` + +const LanguageSql Language = `sql` + +// String representation for [fmt.Print] +func (f *Language) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Language) Set(v string) error { + switch v { + case `python`, `scala`, `sql`: + *f = Language(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "python", "scala", "sql"`, v) + } +} + +// Type always returns Language to satisfy [pflag.Value] interface +func (f *Language) Type() string { + return "Language" +} + +type Library struct { + // Specification of a CRAN library to be installed as part of the library + Cran *RCranLibrary `tfsdk:"cran"` + // URI of the egg library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "egg": + // "/Workspace/path/to/library.egg" }`, `{ "egg" : + // "/Volumes/path/to/library.egg" }` or `{ "egg": + // "s3://my-bucket/library.egg" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the + // cluster with an IAM role to access the S3 URI. + Egg types.String `tfsdk:"egg"` + // URI of the JAR library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "jar": + // "/Workspace/path/to/library.jar" }`, `{ "jar" : + // "/Volumes/path/to/library.jar" }` or `{ "jar": + // "s3://my-bucket/library.jar" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the + // cluster with an IAM role to access the S3 URI. + Jar types.String `tfsdk:"jar"` + // Specification of a maven library to be installed. For example: `{ + // "coordinates": "org.jsoup:jsoup:1.7.2" }` + Maven *MavenLibrary `tfsdk:"maven"` + // Specification of a PyPi library to be installed. For example: `{ + // "package": "simplejson" }` + Pypi *PythonPyPiLibrary `tfsdk:"pypi"` + // URI of the requirements.txt file to install. Only Workspace paths and + // Unity Catalog Volumes paths are supported. For example: `{ + // "requirements": "/Workspace/path/to/requirements.txt" }` or `{ + // "requirements" : "/Volumes/path/to/requirements.txt" }` + Requirements types.String `tfsdk:"requirements"` + // URI of the wheel library to install. Supported URIs include Workspace + // paths, Unity Catalog Volumes paths, and S3 URIs. For example: `{ "whl": + // "/Workspace/path/to/library.whl" }`, `{ "whl" : + // "/Volumes/path/to/library.whl" }` or `{ "whl": + // "s3://my-bucket/library.whl" }`. If S3 is used, please make sure the + // cluster has read access on the library. You may need to launch the + // cluster with an IAM role to access the S3 URI. + Whl types.String `tfsdk:"whl"` +} + +// The status of the library on a specific cluster. +type LibraryFullStatus struct { + // Whether the library was set to be installed on all clusters via the + // libraries UI. + IsLibraryForAllClusters types.Bool `tfsdk:"is_library_for_all_clusters"` + // Unique identifier for the library. + Library *Library `tfsdk:"library"` + // All the info and warning messages that have occurred so far for this + // library. + Messages []types.String `tfsdk:"messages"` + // Status of installing the library on the cluster. + Status LibraryInstallStatus `tfsdk:"status"` +} + +// The status of a library on a specific cluster. +type LibraryInstallStatus string + +const LibraryInstallStatusFailed LibraryInstallStatus = `FAILED` + +const LibraryInstallStatusInstalled LibraryInstallStatus = `INSTALLED` + +const LibraryInstallStatusInstalling LibraryInstallStatus = `INSTALLING` + +const LibraryInstallStatusPending LibraryInstallStatus = `PENDING` + +const LibraryInstallStatusResolving LibraryInstallStatus = `RESOLVING` + +const LibraryInstallStatusRestored LibraryInstallStatus = `RESTORED` + +const LibraryInstallStatusSkipped LibraryInstallStatus = `SKIPPED` + +const LibraryInstallStatusUninstallOnRestart LibraryInstallStatus = `UNINSTALL_ON_RESTART` + +// String representation for [fmt.Print] +func (f *LibraryInstallStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LibraryInstallStatus) Set(v string) error { + switch v { + case `FAILED`, `INSTALLED`, `INSTALLING`, `PENDING`, `RESOLVING`, `RESTORED`, `SKIPPED`, `UNINSTALL_ON_RESTART`: + *f = LibraryInstallStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED", "INSTALLED", "INSTALLING", "PENDING", "RESOLVING", "RESTORED", "SKIPPED", "UNINSTALL_ON_RESTART"`, v) + } +} + +// Type always returns LibraryInstallStatus to satisfy [pflag.Value] interface +func (f *LibraryInstallStatus) Type() string { + return "LibraryInstallStatus" +} + +type ListAllClusterLibraryStatusesResponse struct { + // A list of cluster statuses. + Statuses []ClusterLibraryStatuses `tfsdk:"statuses"` +} + +type ListAvailableZonesResponse struct { + // The availability zone if no `zone_id` is provided in the cluster creation + // request. + DefaultZone types.String `tfsdk:"default_zone"` + // The list of available zones (e.g., ['us-west-2c', 'us-east-2']). + Zones []types.String `tfsdk:"zones"` +} + +// List cluster policies +type ListClusterPoliciesRequest struct { + // The cluster policy attribute to sort by. * `POLICY_CREATION_TIME` - Sort + // result list by policy creation time. * `POLICY_NAME` - Sort result list + // by policy name. + SortColumn ListSortColumn `tfsdk:"-" url:"sort_column,omitempty"` + // The order in which the policies get listed. * `DESC` - Sort result list + // in descending order. * `ASC` - Sort result list in ascending order. + SortOrder ListSortOrder `tfsdk:"-" url:"sort_order,omitempty"` +} + +// List all clusters +type ListClustersRequest struct { + // Filter clusters based on what type of client it can be used for. Could be + // either NOTEBOOKS or JOBS. No input for this field will get all clusters + // in the workspace without filtering on its supported client + CanUseClient types.String `tfsdk:"-" url:"can_use_client,omitempty"` +} + +type ListClustersResponse struct { + // + Clusters []ClusterDetails `tfsdk:"clusters"` +} + +type ListGlobalInitScriptsResponse struct { + Scripts []GlobalInitScriptDetails `tfsdk:"scripts"` +} + +type ListInstancePools struct { + InstancePools []InstancePoolAndStats `tfsdk:"instance_pools"` +} + +type ListInstanceProfilesResponse struct { + // A list of instance profiles that the user can access. + InstanceProfiles []InstanceProfile `tfsdk:"instance_profiles"` +} + +type ListNodeTypesResponse struct { + // The list of available Spark node types. + NodeTypes []NodeType `tfsdk:"node_types"` +} + +type ListPoliciesResponse struct { + // List of policies. + Policies []Policy `tfsdk:"policies"` +} + +// List policy families +type ListPolicyFamiliesRequest struct { + // The max number of policy families to return. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // A token that can be used to get the next page of results. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListPolicyFamiliesResponse struct { + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken types.String `tfsdk:"next_page_token"` + // List of policy families. + PolicyFamilies []PolicyFamily `tfsdk:"policy_families"` +} + +type ListSortColumn string + +const ListSortColumnPolicyCreationTime ListSortColumn = `POLICY_CREATION_TIME` + +const ListSortColumnPolicyName ListSortColumn = `POLICY_NAME` + +// String representation for [fmt.Print] +func (f *ListSortColumn) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListSortColumn) Set(v string) error { + switch v { + case `POLICY_CREATION_TIME`, `POLICY_NAME`: + *f = ListSortColumn(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "POLICY_CREATION_TIME", "POLICY_NAME"`, v) + } +} + +// Type always returns ListSortColumn to satisfy [pflag.Value] interface +func (f *ListSortColumn) Type() string { + return "ListSortColumn" +} + +type ListSortOrder string + +const ListSortOrderAsc ListSortOrder = `ASC` + +const ListSortOrderDesc ListSortOrder = `DESC` + +// String representation for [fmt.Print] +func (f *ListSortOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListSortOrder) Set(v string) error { + switch v { + case `ASC`, `DESC`: + *f = ListSortOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ASC", "DESC"`, v) + } +} + +// Type always returns ListSortOrder to satisfy [pflag.Value] interface +func (f *ListSortOrder) Type() string { + return "ListSortOrder" +} + +type LocalFileInfo struct { + // local file destination, e.g. `file:/my/local/file.sh` + Destination types.String `tfsdk:"destination"` +} + +type LogAnalyticsInfo struct { + // + LogAnalyticsPrimaryKey types.String `tfsdk:"log_analytics_primary_key"` + // + LogAnalyticsWorkspaceId types.String `tfsdk:"log_analytics_workspace_id"` +} + +type LogSyncStatus struct { + // The timestamp of last attempt. If the last attempt fails, + // `last_exception` will contain the exception in the last attempt. + LastAttempted types.Int64 `tfsdk:"last_attempted"` + // The exception thrown in the last attempt, it would be null (omitted in + // the response) if there is no exception in last attempted. + LastException types.String `tfsdk:"last_exception"` +} + +type MavenLibrary struct { + // Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". + Coordinates types.String `tfsdk:"coordinates"` + // List of dependences to exclude. For example: `["slf4j:slf4j", + // "*:hadoop-client"]`. + // + // Maven dependency exclusions: + // https://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html. + Exclusions []types.String `tfsdk:"exclusions"` + // Maven repo to install the Maven package from. If omitted, both Maven + // Central Repository and Spark Packages are searched. + Repo types.String `tfsdk:"repo"` +} + +type NodeInstanceType struct { + InstanceTypeId types.String `tfsdk:"instance_type_id"` + + LocalDiskSizeGb types.Int64 `tfsdk:"local_disk_size_gb"` + + LocalDisks types.Int64 `tfsdk:"local_disks"` + + LocalNvmeDiskSizeGb types.Int64 `tfsdk:"local_nvme_disk_size_gb"` + + LocalNvmeDisks types.Int64 `tfsdk:"local_nvme_disks"` +} + +type NodeType struct { + Category types.String `tfsdk:"category"` + // A string description associated with this node type, e.g., "r3.xlarge". + Description types.String `tfsdk:"description"` + + DisplayOrder types.Int64 `tfsdk:"display_order"` + // An identifier for the type of hardware that this node runs on, e.g., + // "r3.2xlarge" in AWS. + InstanceTypeId types.String `tfsdk:"instance_type_id"` + // Whether the node type is deprecated. Non-deprecated node types offer + // greater performance. + IsDeprecated types.Bool `tfsdk:"is_deprecated"` + // AWS specific, whether this instance supports encryption in transit, used + // for hipaa and pci workloads. + IsEncryptedInTransit types.Bool `tfsdk:"is_encrypted_in_transit"` + + IsGraviton types.Bool `tfsdk:"is_graviton"` + + IsHidden types.Bool `tfsdk:"is_hidden"` + + IsIoCacheEnabled types.Bool `tfsdk:"is_io_cache_enabled"` + // Memory (in MB) available for this node type. + MemoryMb types.Int64 `tfsdk:"memory_mb"` + + NodeInfo *CloudProviderNodeInfo `tfsdk:"node_info"` + + NodeInstanceType *NodeInstanceType `tfsdk:"node_instance_type"` + // Unique identifier for this node type. + NodeTypeId types.String `tfsdk:"node_type_id"` + // Number of CPU cores available for this node type. Note that this can be + // fractional, e.g., 2.5 cores, if the the number of cores on a machine + // instance is not divisible by the number of Spark nodes on that machine. + NumCores types.Float64 `tfsdk:"num_cores"` + + NumGpus types.Int64 `tfsdk:"num_gpus"` + + PhotonDriverCapable types.Bool `tfsdk:"photon_driver_capable"` + + PhotonWorkerCapable types.Bool `tfsdk:"photon_worker_capable"` + + SupportClusterTags types.Bool `tfsdk:"support_cluster_tags"` + + SupportEbsVolumes types.Bool `tfsdk:"support_ebs_volumes"` + + SupportPortForwarding types.Bool `tfsdk:"support_port_forwarding"` + // Indicates if this node type can be used for an instance pool or cluster + // with elastic disk enabled. This is true for most node types. + SupportsElasticDisk types.Bool `tfsdk:"supports_elastic_disk"` +} + +type PendingInstanceError struct { + InstanceId types.String `tfsdk:"instance_id"` + + Message types.String `tfsdk:"message"` +} + +type PermanentDeleteCluster struct { + // The cluster to be deleted. + ClusterId types.String `tfsdk:"cluster_id"` +} + +type PermanentDeleteClusterResponse struct { +} + +type PinCluster struct { + // + ClusterId types.String `tfsdk:"cluster_id"` +} + +type PinClusterResponse struct { +} + +type Policy struct { + // Creation time. The timestamp (in millisecond) when this Cluster Policy + // was created. + CreatedAtTimestamp types.Int64 `tfsdk:"created_at_timestamp"` + // Creator user name. The field won't be included in the response if the + // user has already been deleted. + CreatorUserName types.String `tfsdk:"creator_user_name"` + // Policy definition document expressed in [Databricks Cluster Policy + // Definition Language]. + // + // [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + Definition types.String `tfsdk:"definition"` + // Additional human-readable description of the cluster policy. + Description types.String `tfsdk:"description"` + // If true, policy is a default policy created and managed by Databricks. + // Default policies cannot be deleted, and their policy families cannot be + // changed. + IsDefault types.Bool `tfsdk:"is_default"` + // A list of libraries to be installed on the next cluster restart that uses + // this policy. The maximum number of libraries is 500. + Libraries []Library `tfsdk:"libraries"` + // Max number of clusters per user that can be active using this policy. If + // not present, there is no max limit. + MaxClustersPerUser types.Int64 `tfsdk:"max_clusters_per_user"` + // Cluster Policy name requested by the user. This has to be unique. Length + // must be between 1 and 100 characters. + Name types.String `tfsdk:"name"` + // Policy definition JSON document expressed in [Databricks Policy + // Definition Language]. The JSON document must be passed as a string and + // cannot be embedded in the requests. + // + // You can use this to customize the policy definition inherited from the + // policy family. Policy rules specified here are merged into the inherited + // policy definition. + // + // [Databricks Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + PolicyFamilyDefinitionOverrides types.String `tfsdk:"policy_family_definition_overrides"` + // ID of the policy family. + PolicyFamilyId types.String `tfsdk:"policy_family_id"` + // Canonical unique identifier for the Cluster Policy. + PolicyId types.String `tfsdk:"policy_id"` +} + +type PolicyFamily struct { + // Policy definition document expressed in [Databricks Cluster Policy + // Definition Language]. + // + // [Databricks Cluster Policy Definition Language]: https://docs.databricks.com/administration-guide/clusters/policy-definition.html + Definition types.String `tfsdk:"definition"` + // Human-readable description of the purpose of the policy family. + Description types.String `tfsdk:"description"` + // Name of the policy family. + Name types.String `tfsdk:"name"` + // ID of the policy family. + PolicyFamilyId types.String `tfsdk:"policy_family_id"` +} + +type PythonPyPiLibrary struct { + // The name of the pypi package to install. An optional exact version + // specification is also supported. Examples: "simplejson" and + // "simplejson==3.8.0". + Package types.String `tfsdk:"package"` + // The repository where the package can be found. If not specified, the + // default pip index is used. + Repo types.String `tfsdk:"repo"` +} + +type RCranLibrary struct { + // The name of the CRAN package to install. + Package types.String `tfsdk:"package"` + // The repository where the package can be found. If not specified, the + // default CRAN repo is used. + Repo types.String `tfsdk:"repo"` +} + +type RemoveInstanceProfile struct { + // The ARN of the instance profile to remove. This field is required. + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` +} + +type RemoveResponse struct { +} + +type ResizeCluster struct { + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *AutoScale `tfsdk:"autoscale"` + // The cluster to be resized. + ClusterId types.String `tfsdk:"cluster_id"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers types.Int64 `tfsdk:"num_workers"` +} + +type ResizeClusterResponse struct { +} + +type RestartCluster struct { + // The cluster to be started. + ClusterId types.String `tfsdk:"cluster_id"` + // + RestartUser types.String `tfsdk:"restart_user"` +} + +type RestartClusterResponse struct { +} + +type ResultType string + +const ResultTypeError ResultType = `error` + +const ResultTypeImage ResultType = `image` + +const ResultTypeImages ResultType = `images` + +const ResultTypeTable ResultType = `table` + +const ResultTypeText ResultType = `text` + +// String representation for [fmt.Print] +func (f *ResultType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ResultType) Set(v string) error { + switch v { + case `error`, `image`, `images`, `table`, `text`: + *f = ResultType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "error", "image", "images", "table", "text"`, v) + } +} + +// Type always returns ResultType to satisfy [pflag.Value] interface +func (f *ResultType) Type() string { + return "ResultType" +} + +type Results struct { + // The cause of the error + Cause types.String `tfsdk:"cause"` + + Data any `tfsdk:"data"` + // The image filename + FileName types.String `tfsdk:"fileName"` + + FileNames []types.String `tfsdk:"fileNames"` + // true if a JSON schema is returned instead of a string representation of + // the Hive type. + IsJsonSchema types.Bool `tfsdk:"isJsonSchema"` + // internal field used by SDK + Pos types.Int64 `tfsdk:"pos"` + + ResultType ResultType `tfsdk:"resultType"` + // The table schema + Schema []map[string]any `tfsdk:"schema"` + // The summary of the error + Summary types.String `tfsdk:"summary"` + // true if partial results are returned. + Truncated types.Bool `tfsdk:"truncated"` +} + +// Decides which runtime engine to be use, e.g. Standard vs. Photon. If +// unspecified, the runtime engine is inferred from spark_version. +type RuntimeEngine string + +const RuntimeEngineNull RuntimeEngine = `NULL` + +const RuntimeEnginePhoton RuntimeEngine = `PHOTON` + +const RuntimeEngineStandard RuntimeEngine = `STANDARD` + +// String representation for [fmt.Print] +func (f *RuntimeEngine) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RuntimeEngine) Set(v string) error { + switch v { + case `NULL`, `PHOTON`, `STANDARD`: + *f = RuntimeEngine(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NULL", "PHOTON", "STANDARD"`, v) + } +} + +// Type always returns RuntimeEngine to satisfy [pflag.Value] interface +func (f *RuntimeEngine) Type() string { + return "RuntimeEngine" +} + +type S3StorageInfo struct { + // (Optional) Set canned access control list for the logs, e.g. + // `bucket-owner-full-control`. If `canned_cal` is set, please make sure the + // cluster iam role has `s3:PutObjectAcl` permission on the destination + // bucket and prefix. The full list of possible canned acl can be found at + // http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl. + // Please also note that by default only the object owner gets full + // controls. If you are using cross account role for writing data, you may + // want to set `bucket-owner-full-control` to make bucket owner able to read + // the logs. + CannedAcl types.String `tfsdk:"canned_acl"` + // S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be + // delivered using cluster iam role, please make sure you set cluster iam + // role and the role has write access to the destination. Please also note + // that you cannot use AWS keys to deliver logs. + Destination types.String `tfsdk:"destination"` + // (Optional) Flag to enable server side encryption, `false` by default. + EnableEncryption types.Bool `tfsdk:"enable_encryption"` + // (Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It + // will be used only when encryption is enabled and the default type is + // `sse-s3`. + EncryptionType types.String `tfsdk:"encryption_type"` + // S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or + // endpoint needs to be set. If both are set, endpoint will be used. + Endpoint types.String `tfsdk:"endpoint"` + // (Optional) Kms key which will be used if encryption is enabled and + // encryption type is set to `sse-kms`. + KmsKey types.String `tfsdk:"kms_key"` + // S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. + // If both are set, endpoint will be used. + Region types.String `tfsdk:"region"` +} + +type SparkNode struct { + // The private IP address of the host instance. + HostPrivateIp types.String `tfsdk:"host_private_ip"` + // Globally unique identifier for the host instance from the cloud provider. + InstanceId types.String `tfsdk:"instance_id"` + // Attributes specific to AWS for a Spark node. + NodeAwsAttributes *SparkNodeAwsAttributes `tfsdk:"node_aws_attributes"` + // Globally unique identifier for this node. + NodeId types.String `tfsdk:"node_id"` + // Private IP address (typically a 10.x.x.x address) of the Spark node. Note + // that this is different from the private IP address of the host instance. + PrivateIp types.String `tfsdk:"private_ip"` + // Public DNS address of this node. This address can be used to access the + // Spark JDBC server on the driver node. To communicate with the JDBC + // server, traffic must be manually authorized by adding security group + // rules to the "worker-unmanaged" security group via the AWS console. + // + // Actually it's the public DNS address of the host instance. + PublicDns types.String `tfsdk:"public_dns"` + // The timestamp (in millisecond) when the Spark node is launched. + // + // The start_timestamp is set right before the container is being launched. + // The timestamp when the container is placed on the ResourceManager, before + // its launch and setup by the NodeDaemon. This timestamp is the same as the + // creation timestamp in the database. + StartTimestamp types.Int64 `tfsdk:"start_timestamp"` +} + +type SparkNodeAwsAttributes struct { + // Whether this node is on an Amazon spot instance. + IsSpot types.Bool `tfsdk:"is_spot"` +} + +type SparkVersion struct { + // Spark version key, for example "2.1.x-scala2.11". This is the value which + // should be provided as the "spark_version" when creating a new cluster. + // Note that the exact Spark version may change over time for a "wildcard" + // version (i.e., "2.1.x-scala2.11" is a "wildcard" version) with minor bug + // fixes. + Key types.String `tfsdk:"key"` + // A descriptive name for this Spark version, for example "Spark 2.1". + Name types.String `tfsdk:"name"` +} + +type StartCluster struct { + // The cluster to be started. + ClusterId types.String `tfsdk:"cluster_id"` +} + +type StartClusterResponse struct { +} + +// Current state of the cluster. +type State string + +const StateError State = `ERROR` + +const StatePending State = `PENDING` + +const StateResizing State = `RESIZING` + +const StateRestarting State = `RESTARTING` + +const StateRunning State = `RUNNING` + +const StateTerminated State = `TERMINATED` + +const StateTerminating State = `TERMINATING` + +const StateUnknown State = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *State) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *State) Set(v string) error { + switch v { + case `ERROR`, `PENDING`, `RESIZING`, `RESTARTING`, `RUNNING`, `TERMINATED`, `TERMINATING`, `UNKNOWN`: + *f = State(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ERROR", "PENDING", "RESIZING", "RESTARTING", "RUNNING", "TERMINATED", "TERMINATING", "UNKNOWN"`, v) + } +} + +// Type always returns State to satisfy [pflag.Value] interface +func (f *State) Type() string { + return "State" +} + +type TerminationReason struct { + // status code indicating why the cluster was terminated + Code TerminationReasonCode `tfsdk:"code"` + // list of parameters that provide additional information about why the + // cluster was terminated + Parameters map[string]types.String `tfsdk:"parameters"` + // type of the termination + Type TerminationReasonType `tfsdk:"type"` +} + +// status code indicating why the cluster was terminated +type TerminationReasonCode string + +const TerminationReasonCodeAbuseDetected TerminationReasonCode = `ABUSE_DETECTED` + +const TerminationReasonCodeAttachProjectFailure TerminationReasonCode = `ATTACH_PROJECT_FAILURE` + +const TerminationReasonCodeAwsAuthorizationFailure TerminationReasonCode = `AWS_AUTHORIZATION_FAILURE` + +const TerminationReasonCodeAwsInsufficientFreeAddressesInSubnetFailure TerminationReasonCode = `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE` + +const TerminationReasonCodeAwsInsufficientInstanceCapacityFailure TerminationReasonCode = `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE` + +const TerminationReasonCodeAwsMaxSpotInstanceCountExceededFailure TerminationReasonCode = `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE` + +const TerminationReasonCodeAwsRequestLimitExceeded TerminationReasonCode = `AWS_REQUEST_LIMIT_EXCEEDED` + +const TerminationReasonCodeAwsUnsupportedFailure TerminationReasonCode = `AWS_UNSUPPORTED_FAILURE` + +const TerminationReasonCodeAzureByokKeyPermissionFailure TerminationReasonCode = `AZURE_BYOK_KEY_PERMISSION_FAILURE` + +const TerminationReasonCodeAzureEphemeralDiskFailure TerminationReasonCode = `AZURE_EPHEMERAL_DISK_FAILURE` + +const TerminationReasonCodeAzureInvalidDeploymentTemplate TerminationReasonCode = `AZURE_INVALID_DEPLOYMENT_TEMPLATE` + +const TerminationReasonCodeAzureOperationNotAllowedException TerminationReasonCode = `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION` + +const TerminationReasonCodeAzureQuotaExceededException TerminationReasonCode = `AZURE_QUOTA_EXCEEDED_EXCEPTION` + +const TerminationReasonCodeAzureResourceManagerThrottling TerminationReasonCode = `AZURE_RESOURCE_MANAGER_THROTTLING` + +const TerminationReasonCodeAzureResourceProviderThrottling TerminationReasonCode = `AZURE_RESOURCE_PROVIDER_THROTTLING` + +const TerminationReasonCodeAzureUnexpectedDeploymentTemplateFailure TerminationReasonCode = `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE` + +const TerminationReasonCodeAzureVmExtensionFailure TerminationReasonCode = `AZURE_VM_EXTENSION_FAILURE` + +const TerminationReasonCodeAzureVnetConfigurationFailure TerminationReasonCode = `AZURE_VNET_CONFIGURATION_FAILURE` + +const TerminationReasonCodeBootstrapTimeout TerminationReasonCode = `BOOTSTRAP_TIMEOUT` + +const TerminationReasonCodeBootstrapTimeoutCloudProviderException TerminationReasonCode = `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION` + +const TerminationReasonCodeCloudProviderDiskSetupFailure TerminationReasonCode = `CLOUD_PROVIDER_DISK_SETUP_FAILURE` + +const TerminationReasonCodeCloudProviderLaunchFailure TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE` + +const TerminationReasonCodeCloudProviderResourceStockout TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT` + +const TerminationReasonCodeCloudProviderShutdown TerminationReasonCode = `CLOUD_PROVIDER_SHUTDOWN` + +const TerminationReasonCodeCommunicationLost TerminationReasonCode = `COMMUNICATION_LOST` + +const TerminationReasonCodeContainerLaunchFailure TerminationReasonCode = `CONTAINER_LAUNCH_FAILURE` + +const TerminationReasonCodeControlPlaneRequestFailure TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE` + +const TerminationReasonCodeDatabaseConnectionFailure TerminationReasonCode = `DATABASE_CONNECTION_FAILURE` + +const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE` + +const TerminationReasonCodeDriverUnreachable TerminationReasonCode = `DRIVER_UNREACHABLE` + +const TerminationReasonCodeDriverUnresponsive TerminationReasonCode = `DRIVER_UNRESPONSIVE` + +const TerminationReasonCodeExecutionComponentUnhealthy TerminationReasonCode = `EXECUTION_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeGcpQuotaExceeded TerminationReasonCode = `GCP_QUOTA_EXCEEDED` + +const TerminationReasonCodeGcpServiceAccountDeleted TerminationReasonCode = `GCP_SERVICE_ACCOUNT_DELETED` + +const TerminationReasonCodeGlobalInitScriptFailure TerminationReasonCode = `GLOBAL_INIT_SCRIPT_FAILURE` + +const TerminationReasonCodeHiveMetastoreProvisioningFailure TerminationReasonCode = `HIVE_METASTORE_PROVISIONING_FAILURE` + +const TerminationReasonCodeImagePullPermissionDenied TerminationReasonCode = `IMAGE_PULL_PERMISSION_DENIED` + +const TerminationReasonCodeInactivity TerminationReasonCode = `INACTIVITY` + +const TerminationReasonCodeInitScriptFailure TerminationReasonCode = `INIT_SCRIPT_FAILURE` + +const TerminationReasonCodeInstancePoolClusterFailure TerminationReasonCode = `INSTANCE_POOL_CLUSTER_FAILURE` + +const TerminationReasonCodeInstanceUnreachable TerminationReasonCode = `INSTANCE_UNREACHABLE` + +const TerminationReasonCodeInternalError TerminationReasonCode = `INTERNAL_ERROR` + +const TerminationReasonCodeInvalidArgument TerminationReasonCode = `INVALID_ARGUMENT` + +const TerminationReasonCodeInvalidSparkImage TerminationReasonCode = `INVALID_SPARK_IMAGE` + +const TerminationReasonCodeIpExhaustionFailure TerminationReasonCode = `IP_EXHAUSTION_FAILURE` + +const TerminationReasonCodeJobFinished TerminationReasonCode = `JOB_FINISHED` + +const TerminationReasonCodeK8sAutoscalingFailure TerminationReasonCode = `K8S_AUTOSCALING_FAILURE` + +const TerminationReasonCodeK8sDbrClusterLaunchTimeout TerminationReasonCode = `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT` + +const TerminationReasonCodeMetastoreComponentUnhealthy TerminationReasonCode = `METASTORE_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEPHOS_RESOURCE_MANAGEMENT` + +const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE` + +const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE` + +const TerminationReasonCodeNpipTunnelSetupFailure TerminationReasonCode = `NPIP_TUNNEL_SETUP_FAILURE` + +const TerminationReasonCodeNpipTunnelTokenFailure TerminationReasonCode = `NPIP_TUNNEL_TOKEN_FAILURE` + +const TerminationReasonCodeRequestRejected TerminationReasonCode = `REQUEST_REJECTED` + +const TerminationReasonCodeRequestThrottled TerminationReasonCode = `REQUEST_THROTTLED` + +const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR` + +const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION` + +const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE` + +const TerminationReasonCodeSkippedSlowNodes TerminationReasonCode = `SKIPPED_SLOW_NODES` + +const TerminationReasonCodeSlowImageDownload TerminationReasonCode = `SLOW_IMAGE_DOWNLOAD` + +const TerminationReasonCodeSparkError TerminationReasonCode = `SPARK_ERROR` + +const TerminationReasonCodeSparkImageDownloadFailure TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_FAILURE` + +const TerminationReasonCodeSparkStartupFailure TerminationReasonCode = `SPARK_STARTUP_FAILURE` + +const TerminationReasonCodeSpotInstanceTermination TerminationReasonCode = `SPOT_INSTANCE_TERMINATION` + +const TerminationReasonCodeStorageDownloadFailure TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE` + +const TerminationReasonCodeStsClientSetupFailure TerminationReasonCode = `STS_CLIENT_SETUP_FAILURE` + +const TerminationReasonCodeSubnetExhaustedFailure TerminationReasonCode = `SUBNET_EXHAUSTED_FAILURE` + +const TerminationReasonCodeTemporarilyUnavailable TerminationReasonCode = `TEMPORARILY_UNAVAILABLE` + +const TerminationReasonCodeTrialExpired TerminationReasonCode = `TRIAL_EXPIRED` + +const TerminationReasonCodeUnexpectedLaunchFailure TerminationReasonCode = `UNEXPECTED_LAUNCH_FAILURE` + +const TerminationReasonCodeUnknown TerminationReasonCode = `UNKNOWN` + +const TerminationReasonCodeUnsupportedInstanceType TerminationReasonCode = `UNSUPPORTED_INSTANCE_TYPE` + +const TerminationReasonCodeUpdateInstanceProfileFailure TerminationReasonCode = `UPDATE_INSTANCE_PROFILE_FAILURE` + +const TerminationReasonCodeUserRequest TerminationReasonCode = `USER_REQUEST` + +const TerminationReasonCodeWorkerSetupFailure TerminationReasonCode = `WORKER_SETUP_FAILURE` + +const TerminationReasonCodeWorkspaceCancelledError TerminationReasonCode = `WORKSPACE_CANCELLED_ERROR` + +const TerminationReasonCodeWorkspaceConfigurationError TerminationReasonCode = `WORKSPACE_CONFIGURATION_ERROR` + +// String representation for [fmt.Print] +func (f *TerminationReasonCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationReasonCode) Set(v string) error { + switch v { + case `ABUSE_DETECTED`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_SHUTDOWN`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `DATABASE_CONNECTION_FAILURE`, `DBFS_COMPONENT_UNHEALTHY`, `DOCKER_IMAGE_PULL_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `EXECUTION_COMPONENT_UNHEALTHY`, `GCP_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_UNREACHABLE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_SPARK_IMAGE`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `STORAGE_DOWNLOAD_FAILURE`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`: + *f = TerminationReasonCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_SHUTDOWN", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "DATABASE_CONNECTION_FAILURE", "DBFS_COMPONENT_UNHEALTHY", "DOCKER_IMAGE_PULL_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "EXECUTION_COMPONENT_UNHEALTHY", "GCP_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_DELETED", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_UNREACHABLE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_SPARK_IMAGE", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "STORAGE_DOWNLOAD_FAILURE", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR"`, v) + } +} + +// Type always returns TerminationReasonCode to satisfy [pflag.Value] interface +func (f *TerminationReasonCode) Type() string { + return "TerminationReasonCode" +} + +// type of the termination +type TerminationReasonType string + +const TerminationReasonTypeClientError TerminationReasonType = `CLIENT_ERROR` + +const TerminationReasonTypeCloudFailure TerminationReasonType = `CLOUD_FAILURE` + +const TerminationReasonTypeServiceFault TerminationReasonType = `SERVICE_FAULT` + +const TerminationReasonTypeSuccess TerminationReasonType = `SUCCESS` + +// String representation for [fmt.Print] +func (f *TerminationReasonType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationReasonType) Set(v string) error { + switch v { + case `CLIENT_ERROR`, `CLOUD_FAILURE`, `SERVICE_FAULT`, `SUCCESS`: + *f = TerminationReasonType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLIENT_ERROR", "CLOUD_FAILURE", "SERVICE_FAULT", "SUCCESS"`, v) + } +} + +// Type always returns TerminationReasonType to satisfy [pflag.Value] interface +func (f *TerminationReasonType) Type() string { + return "TerminationReasonType" +} + +type UninstallLibraries struct { + // Unique identifier for the cluster on which to uninstall these libraries. + ClusterId types.String `tfsdk:"cluster_id"` + // The libraries to uninstall. + Libraries []Library `tfsdk:"libraries"` +} + +type UninstallLibrariesResponse struct { +} + +type UnpinCluster struct { + // + ClusterId types.String `tfsdk:"cluster_id"` +} + +type UnpinClusterResponse struct { +} + +type UpdateResponse struct { +} + +type VolumesStorageInfo struct { + // Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh` + Destination types.String `tfsdk:"destination"` +} + +type WorkloadType struct { + // defined what type of clients can use the cluster. E.g. Notebooks, Jobs + Clients ClientsTypes `tfsdk:"clients"` +} + +type WorkspaceStorageInfo struct { + // workspace files destination, e.g. + // `/Users/user1@databricks.com/my-init.sh` + Destination types.String `tfsdk:"destination"` +} diff --git a/service/dashboards_tf/model.go b/service/dashboards_tf/model.go new file mode 100755 index 0000000000..9703eb3d5a --- /dev/null +++ b/service/dashboards_tf/model.go @@ -0,0 +1,443 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package dashboards_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type CreateDashboardRequest struct { + // The display name of the dashboard. + DisplayName types.String `tfsdk:"display_name"` + // The workspace path of the folder containing the dashboard. Includes + // leading slash and no trailing slash. + ParentPath types.String `tfsdk:"parent_path"` + // The contents of the dashboard in serialized string form. + SerializedDashboard types.String `tfsdk:"serialized_dashboard"` + // The warehouse ID used to run the dashboard. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type CreateScheduleRequest struct { + // The cron expression describing the frequency of the periodic refresh for + // this schedule. + CronSchedule CronSchedule `tfsdk:"cron_schedule"` + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"-" url:"-"` + // The display name for schedule. + DisplayName types.String `tfsdk:"display_name"` + // The status indicates whether this schedule is paused or not. + PauseStatus SchedulePauseStatus `tfsdk:"pause_status"` +} + +type CreateSubscriptionRequest struct { + // UUID identifying the dashboard to which the subscription belongs. + DashboardId types.String `tfsdk:"-" url:"-"` + // UUID identifying the schedule to which the subscription belongs. + ScheduleId types.String `tfsdk:"-" url:"-"` + // Subscriber details for users and destinations to be added as subscribers + // to the schedule. + Subscriber Subscriber `tfsdk:"subscriber"` +} + +type CronSchedule struct { + // A cron expression using quartz syntax. EX: `0 0 8 * * ?` represents + // everyday at 8am. See [Cron Trigger] for details. + // + // [Cron Trigger]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html + QuartzCronExpression types.String `tfsdk:"quartz_cron_expression"` + // A Java timezone id. The schedule will be resolved with respect to this + // timezone. See [Java TimeZone] for details. + // + // [Java TimeZone]: https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html + TimezoneId types.String `tfsdk:"timezone_id"` +} + +type Dashboard struct { + // The timestamp of when the dashboard was created. + CreateTime types.String `tfsdk:"create_time"` + // UUID identifying the dashboard. + DashboardId types.String `tfsdk:"dashboard_id"` + // The display name of the dashboard. + DisplayName types.String `tfsdk:"display_name"` + // The etag for the dashboard. Can be optionally provided on updates to + // ensure that the dashboard has not been modified since the last read. + Etag types.String `tfsdk:"etag"` + // The state of the dashboard resource. Used for tracking trashed status. + LifecycleState LifecycleState `tfsdk:"lifecycle_state"` + // The workspace path of the folder containing the dashboard. Includes + // leading slash and no trailing slash. + ParentPath types.String `tfsdk:"parent_path"` + // The workspace path of the dashboard asset, including the file name. + Path types.String `tfsdk:"path"` + // The contents of the dashboard in serialized string form. + SerializedDashboard types.String `tfsdk:"serialized_dashboard"` + // The timestamp of when the dashboard was last updated by the user. + UpdateTime types.String `tfsdk:"update_time"` + // The warehouse ID used to run the dashboard. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type DashboardView string + +const DashboardViewDashboardViewBasic DashboardView = `DASHBOARD_VIEW_BASIC` + +const DashboardViewDashboardViewFull DashboardView = `DASHBOARD_VIEW_FULL` + +// String representation for [fmt.Print] +func (f *DashboardView) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DashboardView) Set(v string) error { + switch v { + case `DASHBOARD_VIEW_BASIC`, `DASHBOARD_VIEW_FULL`: + *f = DashboardView(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DASHBOARD_VIEW_BASIC", "DASHBOARD_VIEW_FULL"`, v) + } +} + +// Type always returns DashboardView to satisfy [pflag.Value] interface +func (f *DashboardView) Type() string { + return "DashboardView" +} + +// Delete dashboard schedule +type DeleteScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"-" url:"-"` + // The etag for the schedule. Optionally, it can be provided to verify that + // the schedule has not been modified from its last retrieval. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` + // UUID identifying the schedule. + ScheduleId types.String `tfsdk:"-" url:"-"` +} + +type DeleteScheduleResponse struct { +} + +// Delete schedule subscription +type DeleteSubscriptionRequest struct { + // UUID identifying the dashboard which the subscription belongs. + DashboardId types.String `tfsdk:"-" url:"-"` + // The etag for the subscription. Can be optionally provided to ensure that + // the subscription has not been modified since the last read. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` + // UUID identifying the schedule which the subscription belongs. + ScheduleId types.String `tfsdk:"-" url:"-"` + // UUID identifying the subscription. + SubscriptionId types.String `tfsdk:"-" url:"-"` +} + +type DeleteSubscriptionResponse struct { +} + +// Get dashboard +type GetDashboardRequest struct { + // UUID identifying the dashboard. + DashboardId types.String `tfsdk:"-" url:"-"` +} + +// Get published dashboard +type GetPublishedDashboardRequest struct { + // UUID identifying the dashboard to be published. + DashboardId types.String `tfsdk:"-" url:"-"` +} + +// Get dashboard schedule +type GetScheduleRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"-" url:"-"` + // UUID identifying the schedule. + ScheduleId types.String `tfsdk:"-" url:"-"` +} + +// Get schedule subscription +type GetSubscriptionRequest struct { + // UUID identifying the dashboard which the subscription belongs. + DashboardId types.String `tfsdk:"-" url:"-"` + // UUID identifying the schedule which the subscription belongs. + ScheduleId types.String `tfsdk:"-" url:"-"` + // UUID identifying the subscription. + SubscriptionId types.String `tfsdk:"-" url:"-"` +} + +type LifecycleState string + +const LifecycleStateActive LifecycleState = `ACTIVE` + +const LifecycleStateTrashed LifecycleState = `TRASHED` + +// String representation for [fmt.Print] +func (f *LifecycleState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *LifecycleState) Set(v string) error { + switch v { + case `ACTIVE`, `TRASHED`: + *f = LifecycleState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "TRASHED"`, v) + } +} + +// Type always returns LifecycleState to satisfy [pflag.Value] interface +func (f *LifecycleState) Type() string { + return "LifecycleState" +} + +// List dashboards +type ListDashboardsRequest struct { + // The number of dashboards to return per page. + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + // A page token, received from a previous `ListDashboards` call. This token + // can be used to retrieve the subsequent page. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // The flag to include dashboards located in the trash. If unspecified, only + // active dashboards will be returned. + ShowTrashed types.Bool `tfsdk:"-" url:"show_trashed,omitempty"` + // Indicates whether to include all metadata from the dashboard in the + // response. If unset, the response defaults to `DASHBOARD_VIEW_BASIC` which + // only includes summary metadata from the dashboard. + View DashboardView `tfsdk:"-" url:"view,omitempty"` +} + +type ListDashboardsResponse struct { + Dashboards []Dashboard `tfsdk:"dashboards"` + // A token, which can be sent as `page_token` to retrieve the next page. If + // this field is omitted, there are no subsequent dashboards. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List dashboard schedules +type ListSchedulesRequest struct { + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"-" url:"-"` + // The number of schedules to return per page. + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + // A page token, received from a previous `ListSchedules` call. Use this to + // retrieve the subsequent page. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListSchedulesResponse struct { + // A token that can be used as a `page_token` in subsequent requests to + // retrieve the next page of results. If this field is omitted, there are no + // subsequent schedules. + NextPageToken types.String `tfsdk:"next_page_token"` + + Schedules []Schedule `tfsdk:"schedules"` +} + +// List schedule subscriptions +type ListSubscriptionsRequest struct { + // UUID identifying the dashboard to which the subscription belongs. + DashboardId types.String `tfsdk:"-" url:"-"` + // The number of subscriptions to return per page. + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + // A page token, received from a previous `ListSubscriptions` call. Use this + // to retrieve the subsequent page. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // UUID identifying the schedule to which the subscription belongs. + ScheduleId types.String `tfsdk:"-" url:"-"` +} + +type ListSubscriptionsResponse struct { + // A token that can be used as a `page_token` in subsequent requests to + // retrieve the next page of results. If this field is omitted, there are no + // subsequent subscriptions. + NextPageToken types.String `tfsdk:"next_page_token"` + + Subscriptions []Subscription `tfsdk:"subscriptions"` +} + +type MigrateDashboardRequest struct { + // Display name for the new Lakeview dashboard. + DisplayName types.String `tfsdk:"display_name"` + // The workspace path of the folder to contain the migrated Lakeview + // dashboard. + ParentPath types.String `tfsdk:"parent_path"` + // UUID of the dashboard to be migrated. + SourceDashboardId types.String `tfsdk:"source_dashboard_id"` +} + +type PublishRequest struct { + // UUID identifying the dashboard to be published. + DashboardId types.String `tfsdk:"-" url:"-"` + // Flag to indicate if the publisher's credentials should be embedded in the + // published dashboard. These embedded credentials will be used to execute + // the published dashboard's queries. + EmbedCredentials types.Bool `tfsdk:"embed_credentials"` + // The ID of the warehouse that can be used to override the warehouse which + // was set in the draft. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type PublishedDashboard struct { + // The display name of the published dashboard. + DisplayName types.String `tfsdk:"display_name"` + // Indicates whether credentials are embedded in the published dashboard. + EmbedCredentials types.Bool `tfsdk:"embed_credentials"` + // The timestamp of when the published dashboard was last revised. + RevisionCreateTime types.String `tfsdk:"revision_create_time"` + // The warehouse ID used to run the published dashboard. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type Schedule struct { + // A timestamp indicating when the schedule was created. + CreateTime types.String `tfsdk:"create_time"` + // The cron expression describing the frequency of the periodic refresh for + // this schedule. + CronSchedule CronSchedule `tfsdk:"cron_schedule"` + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"dashboard_id"` + // The display name for schedule. + DisplayName types.String `tfsdk:"display_name"` + // The etag for the schedule. Must be left empty on create, must be provided + // on updates to ensure that the schedule has not been modified since the + // last read, and can be optionally provided on delete. + Etag types.String `tfsdk:"etag"` + // The status indicates whether this schedule is paused or not. + PauseStatus SchedulePauseStatus `tfsdk:"pause_status"` + // UUID identifying the schedule. + ScheduleId types.String `tfsdk:"schedule_id"` + // A timestamp indicating when the schedule was last updated. + UpdateTime types.String `tfsdk:"update_time"` +} + +type SchedulePauseStatus string + +const SchedulePauseStatusPaused SchedulePauseStatus = `PAUSED` + +const SchedulePauseStatusUnpaused SchedulePauseStatus = `UNPAUSED` + +// String representation for [fmt.Print] +func (f *SchedulePauseStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SchedulePauseStatus) Set(v string) error { + switch v { + case `PAUSED`, `UNPAUSED`: + *f = SchedulePauseStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PAUSED", "UNPAUSED"`, v) + } +} + +// Type always returns SchedulePauseStatus to satisfy [pflag.Value] interface +func (f *SchedulePauseStatus) Type() string { + return "SchedulePauseStatus" +} + +type Subscriber struct { + // The destination to receive the subscription email. This parameter is + // mutually exclusive with `user_subscriber`. + DestinationSubscriber *SubscriptionSubscriberDestination `tfsdk:"destination_subscriber"` + // The user to receive the subscription email. This parameter is mutually + // exclusive with `destination_subscriber`. + UserSubscriber *SubscriptionSubscriberUser `tfsdk:"user_subscriber"` +} + +type Subscription struct { + // A timestamp indicating when the subscription was created. + CreateTime types.String `tfsdk:"create_time"` + // UserId of the user who adds subscribers (users or notification + // destinations) to the dashboard's schedule. + CreatedByUserId types.Int64 `tfsdk:"created_by_user_id"` + // UUID identifying the dashboard to which the subscription belongs. + DashboardId types.String `tfsdk:"dashboard_id"` + // The etag for the subscription. Must be left empty on create, can be + // optionally provided on delete to ensure that the subscription has not + // been deleted since the last read. + Etag types.String `tfsdk:"etag"` + // UUID identifying the schedule to which the subscription belongs. + ScheduleId types.String `tfsdk:"schedule_id"` + // Subscriber details for users and destinations to be added as subscribers + // to the schedule. + Subscriber Subscriber `tfsdk:"subscriber"` + // UUID identifying the subscription. + SubscriptionId types.String `tfsdk:"subscription_id"` + // A timestamp indicating when the subscription was last updated. + UpdateTime types.String `tfsdk:"update_time"` +} + +type SubscriptionSubscriberDestination struct { + // The canonical identifier of the destination to receive email + // notification. + DestinationId types.String `tfsdk:"destination_id"` +} + +type SubscriptionSubscriberUser struct { + // UserId of the subscriber. + UserId types.Int64 `tfsdk:"user_id"` +} + +// Trash dashboard +type TrashDashboardRequest struct { + // UUID identifying the dashboard. + DashboardId types.String `tfsdk:"-" url:"-"` +} + +type TrashDashboardResponse struct { +} + +// Unpublish dashboard +type UnpublishDashboardRequest struct { + // UUID identifying the dashboard to be published. + DashboardId types.String `tfsdk:"-" url:"-"` +} + +type UnpublishDashboardResponse struct { +} + +type UpdateDashboardRequest struct { + // UUID identifying the dashboard. + DashboardId types.String `tfsdk:"-" url:"-"` + // The display name of the dashboard. + DisplayName types.String `tfsdk:"display_name"` + // The etag for the dashboard. Can be optionally provided on updates to + // ensure that the dashboard has not been modified since the last read. + Etag types.String `tfsdk:"etag"` + // The contents of the dashboard in serialized string form. + SerializedDashboard types.String `tfsdk:"serialized_dashboard"` + // The warehouse ID used to run the dashboard. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type UpdateScheduleRequest struct { + // The cron expression describing the frequency of the periodic refresh for + // this schedule. + CronSchedule CronSchedule `tfsdk:"cron_schedule"` + // UUID identifying the dashboard to which the schedule belongs. + DashboardId types.String `tfsdk:"-" url:"-"` + // The display name for schedule. + DisplayName types.String `tfsdk:"display_name"` + // The etag for the schedule. Must be left empty on create, must be provided + // on updates to ensure that the schedule has not been modified since the + // last read, and can be optionally provided on delete. + Etag types.String `tfsdk:"etag"` + // The status indicates whether this schedule is paused or not. + PauseStatus SchedulePauseStatus `tfsdk:"pause_status"` + // UUID identifying the schedule. + ScheduleId types.String `tfsdk:"-" url:"-"` +} diff --git a/service/files_tf/model.go b/service/files_tf/model.go new file mode 100755 index 0000000000..69cfcfdf53 --- /dev/null +++ b/service/files_tf/model.go @@ -0,0 +1,266 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package files_tf + +import ( + "io" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AddBlock struct { + // The base64-encoded data to append to the stream. This has a limit of 1 + // MB. + Data types.String `tfsdk:"data"` + // The handle on an open stream. + Handle types.Int64 `tfsdk:"handle"` +} + +type AddBlockResponse struct { +} + +type Close struct { + // The handle on an open stream. + Handle types.Int64 `tfsdk:"handle"` +} + +type CloseResponse struct { +} + +type Create struct { + // The flag that specifies whether to overwrite existing file/files. + Overwrite types.Bool `tfsdk:"overwrite"` + // The path of the new file. The path should be the absolute DBFS path. + Path types.String `tfsdk:"path"` +} + +// Create a directory +type CreateDirectoryRequest struct { + // The absolute path of a directory. + DirectoryPath types.String `tfsdk:"-" url:"-"` +} + +type CreateDirectoryResponse struct { +} + +type CreateResponse struct { + // Handle which should subsequently be passed into the AddBlock and Close + // calls when writing to a file through a stream. + Handle types.Int64 `tfsdk:"handle"` +} + +type Delete struct { + // The path of the file or directory to delete. The path should be the + // absolute DBFS path. + Path types.String `tfsdk:"path"` + // Whether or not to recursively delete the directory's contents. Deleting + // empty directories can be done without providing the recursive flag. + Recursive types.Bool `tfsdk:"recursive"` +} + +// Delete a directory +type DeleteDirectoryRequest struct { + // The absolute path of a directory. + DirectoryPath types.String `tfsdk:"-" url:"-"` +} + +type DeleteDirectoryResponse struct { +} + +// Delete a file +type DeleteFileRequest struct { + // The absolute path of the file. + FilePath types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +type DirectoryEntry struct { + // The length of the file in bytes. This field is omitted for directories. + FileSize types.Int64 `tfsdk:"file_size"` + // True if the path is a directory. + IsDirectory types.Bool `tfsdk:"is_directory"` + // Last modification time of given file in milliseconds since unix epoch. + LastModified types.Int64 `tfsdk:"last_modified"` + // The name of the file or directory. This is the last component of the + // path. + Name types.String `tfsdk:"name"` + // The absolute path of the file or directory. + Path types.String `tfsdk:"path"` +} + +// Download a file +type DownloadRequest struct { + // The absolute path of the file. + FilePath types.String `tfsdk:"-" url:"-"` +} + +type DownloadResponse struct { + ContentLength types.Int64 `tfsdk:"-" url:"-" header:"content-length,omitempty"` + + ContentType types.String `tfsdk:"-" url:"-" header:"content-type,omitempty"` + + Contents io.ReadCloser `tfsdk:"-"` + + LastModified types.String `tfsdk:"-" url:"-" header:"last-modified,omitempty"` +} + +type FileInfo struct { + // The length of the file in bytes. This field is omitted for directories. + FileSize types.Int64 `tfsdk:"file_size"` + // True if the path is a directory. + IsDir types.Bool `tfsdk:"is_dir"` + // Last modification time of given file in milliseconds since epoch. + ModificationTime types.Int64 `tfsdk:"modification_time"` + // The absolute path of the file or directory. + Path types.String `tfsdk:"path"` +} + +// Get directory metadata +type GetDirectoryMetadataRequest struct { + // The absolute path of a directory. + DirectoryPath types.String `tfsdk:"-" url:"-"` +} + +type GetDirectoryMetadataResponse struct { +} + +// Get file metadata +type GetMetadataRequest struct { + // The absolute path of the file. + FilePath types.String `tfsdk:"-" url:"-"` +} + +type GetMetadataResponse struct { + ContentLength types.Int64 `tfsdk:"-" url:"-" header:"content-length,omitempty"` + + ContentType types.String `tfsdk:"-" url:"-" header:"content-type,omitempty"` + + LastModified types.String `tfsdk:"-" url:"-" header:"last-modified,omitempty"` +} + +// Get the information of a file or directory +type GetStatusRequest struct { + // The path of the file or directory. The path should be the absolute DBFS + // path. + Path types.String `tfsdk:"-" url:"path"` +} + +// List directory contents or file details +type ListDbfsRequest struct { + // The path of the file or directory. The path should be the absolute DBFS + // path. + Path types.String `tfsdk:"-" url:"path"` +} + +// List directory contents +type ListDirectoryContentsRequest struct { + // The absolute path of a directory. + DirectoryPath types.String `tfsdk:"-" url:"-"` + // The maximum number of directory entries to return. The response may + // contain fewer entries. If the response contains a `next_page_token`, + // there may be more entries, even if fewer than `page_size` entries are in + // the response. + // + // We recommend not to set this value unless you are intentionally listing + // less than the complete directory contents. + // + // If unspecified, at most 1000 directory entries will be returned. The + // maximum value is 1000. Values above 1000 will be coerced to 1000. + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + // An opaque page token which was the `next_page_token` in the response of + // the previous request to list the contents of this directory. Provide this + // token to retrieve the next page of directory entries. When providing a + // `page_token`, all other parameters provided to the request must match the + // previous request. To list all of the entries in a directory, it is + // necessary to continue requesting pages of entries until the response + // contains no `next_page_token`. Note that the number of entries returned + // must not be used to determine when the listing is complete. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListDirectoryResponse struct { + // Array of DirectoryEntry. + Contents []DirectoryEntry `tfsdk:"contents"` + // A token, which can be sent as `page_token` to retrieve the next page. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +type ListStatusResponse struct { + // A list of FileInfo's that describe contents of directory or file. See + // example above. + Files []FileInfo `tfsdk:"files"` +} + +type MkDirs struct { + // The path of the new directory. The path should be the absolute DBFS path. + Path types.String `tfsdk:"path"` +} + +type MkDirsResponse struct { +} + +type Move struct { + // The destination path of the file or directory. The path should be the + // absolute DBFS path. + DestinationPath types.String `tfsdk:"destination_path"` + // The source path of the file or directory. The path should be the absolute + // DBFS path. + SourcePath types.String `tfsdk:"source_path"` +} + +type MoveResponse struct { +} + +type Put struct { + // This parameter might be absent, and instead a posted file will be used. + Contents types.String `tfsdk:"contents"` + // The flag that specifies whether to overwrite existing file/files. + Overwrite types.Bool `tfsdk:"overwrite"` + // The path of the new file. The path should be the absolute DBFS path. + Path types.String `tfsdk:"path"` +} + +type PutResponse struct { +} + +// Get the contents of a file +type ReadDbfsRequest struct { + // The number of bytes to read starting from the offset. This has a limit of + // 1 MB, and a default value of 0.5 MB. + Length types.Int64 `tfsdk:"-" url:"length,omitempty"` + // The offset to read from in bytes. + Offset types.Int64 `tfsdk:"-" url:"offset,omitempty"` + // The path of the file to read. The path should be the absolute DBFS path. + Path types.String `tfsdk:"-" url:"path"` +} + +type ReadResponse struct { + // The number of bytes read (could be less than ``length`` if we hit end of + // file). This refers to number of bytes read in unencoded version (response + // data is base64-encoded). + BytesRead types.Int64 `tfsdk:"bytes_read"` + // The base64-encoded contents of the file read. + Data types.String `tfsdk:"data"` +} + +// Upload a file +type UploadRequest struct { + Contents io.ReadCloser `tfsdk:"-"` + // The absolute path of the file. + FilePath types.String `tfsdk:"-" url:"-"` + // If true, an existing file will be overwritten. + Overwrite types.Bool `tfsdk:"-" url:"overwrite,omitempty"` +} + +type UploadResponse struct { +} diff --git a/service/iam_tf/model.go b/service/iam_tf/model.go new file mode 100755 index 0000000000..8abf7d1111 --- /dev/null +++ b/service/iam_tf/model.go @@ -0,0 +1,1068 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package iam_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel PermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type AccessControlResponse struct { + // All permissions. + AllPermissions []Permission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type ComplexValue struct { + Display types.String `tfsdk:"display"` + + Primary types.Bool `tfsdk:"primary"` + + Ref types.String `tfsdk:"$ref"` + + Type types.String `tfsdk:"type"` + + Value types.String `tfsdk:"value"` +} + +// Delete a group +type DeleteAccountGroupRequest struct { + // Unique ID for a group in the Databricks account. + Id types.String `tfsdk:"-" url:"-"` +} + +// Delete a service principal +type DeleteAccountServicePrincipalRequest struct { + // Unique ID for a service principal in the Databricks account. + Id types.String `tfsdk:"-" url:"-"` +} + +// Delete a user +type DeleteAccountUserRequest struct { + // Unique ID for a user in the Databricks account. + Id types.String `tfsdk:"-" url:"-"` +} + +// Delete a group +type DeleteGroupRequest struct { + // Unique ID for a group in the Databricks workspace. + Id types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete a service principal +type DeleteServicePrincipalRequest struct { + // Unique ID for a service principal in the Databricks workspace. + Id types.String `tfsdk:"-" url:"-"` +} + +// Delete a user +type DeleteUserRequest struct { + // Unique ID for a user in the Databricks workspace. + Id types.String `tfsdk:"-" url:"-"` +} + +// Delete permissions assignment +type DeleteWorkspaceAssignmentRequest struct { + // The ID of the user, service principal, or group. + PrincipalId types.Int64 `tfsdk:"-" url:"-"` + // The workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +type DeleteWorkspaceAssignments struct { +} + +// Get group details +type GetAccountGroupRequest struct { + // Unique ID for a group in the Databricks account. + Id types.String `tfsdk:"-" url:"-"` +} + +// Get service principal details +type GetAccountServicePrincipalRequest struct { + // Unique ID for a service principal in the Databricks account. + Id types.String `tfsdk:"-" url:"-"` +} + +// Get user details +type GetAccountUserRequest struct { + // Comma-separated list of attributes to return in response. + Attributes types.String `tfsdk:"-" url:"attributes,omitempty"` + // Desired number of results per page. Default is 10000. + Count types.Int64 `tfsdk:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes types.String `tfsdk:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Unique ID for a user in the Databricks account. + Id types.String `tfsdk:"-" url:"-"` + // Attribute to sort the results. Multi-part paths are supported. For + // example, `userName`, `name.givenName`, and `emails`. + SortBy types.String `tfsdk:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder GetSortOrder `tfsdk:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex types.Int64 `tfsdk:"-" url:"startIndex,omitempty"` +} + +// Get assignable roles for a resource +type GetAssignableRolesForResourceRequest struct { + // The resource name for which assignable roles will be listed. + Resource types.String `tfsdk:"-" url:"resource"` +} + +type GetAssignableRolesForResourceResponse struct { + Roles []Role `tfsdk:"roles"` +} + +// Get group details +type GetGroupRequest struct { + // Unique ID for a group in the Databricks workspace. + Id types.String `tfsdk:"-" url:"-"` +} + +type GetPasswordPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []PasswordPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get object permission levels +type GetPermissionLevelsRequest struct { + // + RequestObjectId types.String `tfsdk:"-" url:"-"` + // + RequestObjectType types.String `tfsdk:"-" url:"-"` +} + +type GetPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []PermissionsDescription `tfsdk:"permission_levels"` +} + +// Get object permissions +type GetPermissionRequest struct { + // The id of the request object. + RequestObjectId types.String `tfsdk:"-" url:"-"` + // The type of the request object. Can be one of the following: + // authorization, clusters, cluster-policies, directories, experiments, + // files, instance-pools, jobs, notebooks, pipelines, registered-models, + // repos, serving-endpoints, or warehouses. + RequestObjectType types.String `tfsdk:"-" url:"-"` +} + +// Get a rule set +type GetRuleSetRequest struct { + // Etag used for versioning. The response is at least as fresh as the eTag + // provided. Etag is used for optimistic concurrency control as a way to + // help prevent simultaneous updates of a rule set from overwriting each + // other. It is strongly suggested that systems make use of the etag in the + // read -> modify -> write pattern to perform rule set updates in order to + // avoid race conditions that is get an etag from a GET rule set request, + // and pass it with the PUT update request to identify the rule set version + // you are updating. + Etag types.String `tfsdk:"-" url:"etag"` + // The ruleset name associated with the request. + Name types.String `tfsdk:"-" url:"name"` +} + +// Get service principal details +type GetServicePrincipalRequest struct { + // Unique ID for a service principal in the Databricks workspace. + Id types.String `tfsdk:"-" url:"-"` +} + +type GetSortOrder string + +const GetSortOrderAscending GetSortOrder = `ascending` + +const GetSortOrderDescending GetSortOrder = `descending` + +// String representation for [fmt.Print] +func (f *GetSortOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetSortOrder) Set(v string) error { + switch v { + case `ascending`, `descending`: + *f = GetSortOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ascending", "descending"`, v) + } +} + +// Type always returns GetSortOrder to satisfy [pflag.Value] interface +func (f *GetSortOrder) Type() string { + return "GetSortOrder" +} + +// Get user details +type GetUserRequest struct { + // Comma-separated list of attributes to return in response. + Attributes types.String `tfsdk:"-" url:"attributes,omitempty"` + // Desired number of results per page. + Count types.Int64 `tfsdk:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes types.String `tfsdk:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Unique ID for a user in the Databricks workspace. + Id types.String `tfsdk:"-" url:"-"` + // Attribute to sort the results. Multi-part paths are supported. For + // example, `userName`, `name.givenName`, and `emails`. + SortBy types.String `tfsdk:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder GetSortOrder `tfsdk:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex types.Int64 `tfsdk:"-" url:"startIndex,omitempty"` +} + +// List workspace permissions +type GetWorkspaceAssignmentRequest struct { + // The workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +type GrantRule struct { + // Principals this grant rule applies to. + Principals []types.String `tfsdk:"principals"` + // Role that is assigned to the list of principals. + Role types.String `tfsdk:"role"` +} + +type Group struct { + // String that represents a human-readable group name + DisplayName types.String `tfsdk:"displayName"` + // Entitlements assigned to the group. See [assigning entitlements] for a + // full list of supported values. + // + // [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + Entitlements []ComplexValue `tfsdk:"entitlements"` + + ExternalId types.String `tfsdk:"externalId"` + + Groups []ComplexValue `tfsdk:"groups"` + // Databricks group ID + Id types.String `tfsdk:"id" url:"-"` + + Members []ComplexValue `tfsdk:"members"` + // Container for the group identifier. Workspace local versus account. + Meta *ResourceMeta `tfsdk:"meta"` + // Corresponds to AWS instance profile/arn role. + Roles []ComplexValue `tfsdk:"roles"` + // The schema of the group. + Schemas []GroupSchema `tfsdk:"schemas"` +} + +type GroupSchema string + +const GroupSchemaUrnIetfParamsScimSchemasCore20Group GroupSchema = `urn:ietf:params:scim:schemas:core:2.0:Group` + +// String representation for [fmt.Print] +func (f *GroupSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GroupSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:schemas:core:2.0:Group`: + *f = GroupSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:schemas:core:2.0:Group"`, v) + } +} + +// Type always returns GroupSchema to satisfy [pflag.Value] interface +func (f *GroupSchema) Type() string { + return "GroupSchema" +} + +// List group details +type ListAccountGroupsRequest struct { + // Comma-separated list of attributes to return in response. + Attributes types.String `tfsdk:"-" url:"attributes,omitempty"` + // Desired number of results per page. Default is 10000. + Count types.Int64 `tfsdk:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes types.String `tfsdk:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Attribute to sort the results. + SortBy types.String `tfsdk:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `tfsdk:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex types.Int64 `tfsdk:"-" url:"startIndex,omitempty"` +} + +// List service principals +type ListAccountServicePrincipalsRequest struct { + // Comma-separated list of attributes to return in response. + Attributes types.String `tfsdk:"-" url:"attributes,omitempty"` + // Desired number of results per page. Default is 10000. + Count types.Int64 `tfsdk:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes types.String `tfsdk:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Attribute to sort the results. + SortBy types.String `tfsdk:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `tfsdk:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex types.Int64 `tfsdk:"-" url:"startIndex,omitempty"` +} + +// List users +type ListAccountUsersRequest struct { + // Comma-separated list of attributes to return in response. + Attributes types.String `tfsdk:"-" url:"attributes,omitempty"` + // Desired number of results per page. Default is 10000. + Count types.Int64 `tfsdk:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes types.String `tfsdk:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Attribute to sort the results. Multi-part paths are supported. For + // example, `userName`, `name.givenName`, and `emails`. + SortBy types.String `tfsdk:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `tfsdk:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex types.Int64 `tfsdk:"-" url:"startIndex,omitempty"` +} + +// List group details +type ListGroupsRequest struct { + // Comma-separated list of attributes to return in response. + Attributes types.String `tfsdk:"-" url:"attributes,omitempty"` + // Desired number of results per page. + Count types.Int64 `tfsdk:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes types.String `tfsdk:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Attribute to sort the results. + SortBy types.String `tfsdk:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `tfsdk:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex types.Int64 `tfsdk:"-" url:"startIndex,omitempty"` +} + +type ListGroupsResponse struct { + // Total results returned in the response. + ItemsPerPage types.Int64 `tfsdk:"itemsPerPage"` + // User objects returned in the response. + Resources []Group `tfsdk:"Resources"` + // The schema of the service principal. + Schemas []ListResponseSchema `tfsdk:"schemas"` + // Starting index of all the results that matched the request filters. First + // item is number 1. + StartIndex types.Int64 `tfsdk:"startIndex"` + // Total results that match the request filters. + TotalResults types.Int64 `tfsdk:"totalResults"` +} + +type ListResponseSchema string + +const ListResponseSchemaUrnIetfParamsScimApiMessages20ListResponse ListResponseSchema = `urn:ietf:params:scim:api:messages:2.0:ListResponse` + +// String representation for [fmt.Print] +func (f *ListResponseSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListResponseSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:api:messages:2.0:ListResponse`: + *f = ListResponseSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:api:messages:2.0:ListResponse"`, v) + } +} + +// Type always returns ListResponseSchema to satisfy [pflag.Value] interface +func (f *ListResponseSchema) Type() string { + return "ListResponseSchema" +} + +type ListServicePrincipalResponse struct { + // Total results returned in the response. + ItemsPerPage types.Int64 `tfsdk:"itemsPerPage"` + // User objects returned in the response. + Resources []ServicePrincipal `tfsdk:"Resources"` + // The schema of the List response. + Schemas []ListResponseSchema `tfsdk:"schemas"` + // Starting index of all the results that matched the request filters. First + // item is number 1. + StartIndex types.Int64 `tfsdk:"startIndex"` + // Total results that match the request filters. + TotalResults types.Int64 `tfsdk:"totalResults"` +} + +// List service principals +type ListServicePrincipalsRequest struct { + // Comma-separated list of attributes to return in response. + Attributes types.String `tfsdk:"-" url:"attributes,omitempty"` + // Desired number of results per page. + Count types.Int64 `tfsdk:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes types.String `tfsdk:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Attribute to sort the results. + SortBy types.String `tfsdk:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `tfsdk:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex types.Int64 `tfsdk:"-" url:"startIndex,omitempty"` +} + +type ListSortOrder string + +const ListSortOrderAscending ListSortOrder = `ascending` + +const ListSortOrderDescending ListSortOrder = `descending` + +// String representation for [fmt.Print] +func (f *ListSortOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListSortOrder) Set(v string) error { + switch v { + case `ascending`, `descending`: + *f = ListSortOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ascending", "descending"`, v) + } +} + +// Type always returns ListSortOrder to satisfy [pflag.Value] interface +func (f *ListSortOrder) Type() string { + return "ListSortOrder" +} + +// List users +type ListUsersRequest struct { + // Comma-separated list of attributes to return in response. + Attributes types.String `tfsdk:"-" url:"attributes,omitempty"` + // Desired number of results per page. + Count types.Int64 `tfsdk:"-" url:"count,omitempty"` + // Comma-separated list of attributes to exclude in response. + ExcludedAttributes types.String `tfsdk:"-" url:"excludedAttributes,omitempty"` + // Query by which the results have to be filtered. Supported operators are + // equals(`eq`), contains(`co`), starts with(`sw`) and not equals(`ne`). + // Additionally, simple expressions can be formed using logical operators - + // `and` and `or`. The [SCIM RFC] has more details but we currently only + // support simple expressions. + // + // [SCIM RFC]: https://tools.ietf.org/html/rfc7644#section-3.4.2.2 + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Attribute to sort the results. Multi-part paths are supported. For + // example, `userName`, `name.givenName`, and `emails`. + SortBy types.String `tfsdk:"-" url:"sortBy,omitempty"` + // The order to sort the results. + SortOrder ListSortOrder `tfsdk:"-" url:"sortOrder,omitempty"` + // Specifies the index of the first result. First item is number 1. + StartIndex types.Int64 `tfsdk:"-" url:"startIndex,omitempty"` +} + +type ListUsersResponse struct { + // Total results returned in the response. + ItemsPerPage types.Int64 `tfsdk:"itemsPerPage"` + // User objects returned in the response. + Resources []User `tfsdk:"Resources"` + // The schema of the List response. + Schemas []ListResponseSchema `tfsdk:"schemas"` + // Starting index of all the results that matched the request filters. First + // item is number 1. + StartIndex types.Int64 `tfsdk:"startIndex"` + // Total results that match the request filters. + TotalResults types.Int64 `tfsdk:"totalResults"` +} + +// Get permission assignments +type ListWorkspaceAssignmentRequest struct { + // The workspace ID for the account. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +type Name struct { + // Family name of the Databricks user. + FamilyName types.String `tfsdk:"familyName"` + // Given name of the Databricks user. + GivenName types.String `tfsdk:"givenName"` +} + +type ObjectPermissions struct { + AccessControlList []AccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type PartialUpdate struct { + // Unique ID for a user in the Databricks workspace. + Id types.String `tfsdk:"-" url:"-"` + + Operations []Patch `tfsdk:"Operations"` + // The schema of the patch request. Must be + // ["urn:ietf:params:scim:api:messages:2.0:PatchOp"]. + Schemas []PatchSchema `tfsdk:"schemas"` +} + +type PasswordAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel PasswordPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type PasswordAccessControlResponse struct { + // All permissions. + AllPermissions []PasswordPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type PasswordPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel PasswordPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type PasswordPermissionLevel string + +const PasswordPermissionLevelCanUse PasswordPermissionLevel = `CAN_USE` + +// String representation for [fmt.Print] +func (f *PasswordPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PasswordPermissionLevel) Set(v string) error { + switch v { + case `CAN_USE`: + *f = PasswordPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_USE"`, v) + } +} + +// Type always returns PasswordPermissionLevel to satisfy [pflag.Value] interface +func (f *PasswordPermissionLevel) Type() string { + return "PasswordPermissionLevel" +} + +type PasswordPermissions struct { + AccessControlList []PasswordAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type PasswordPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel PasswordPermissionLevel `tfsdk:"permission_level"` +} + +type PasswordPermissionsRequest struct { + AccessControlList []PasswordAccessControlRequest `tfsdk:"access_control_list"` +} + +type Patch struct { + // Type of patch operation. + Op PatchOp `tfsdk:"op"` + // Selection of patch operation + Path types.String `tfsdk:"path"` + // Value to modify + Value any `tfsdk:"value"` +} + +// Type of patch operation. +type PatchOp string + +const PatchOpAdd PatchOp = `add` + +const PatchOpRemove PatchOp = `remove` + +const PatchOpReplace PatchOp = `replace` + +// String representation for [fmt.Print] +func (f *PatchOp) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PatchOp) Set(v string) error { + switch v { + case `add`, `remove`, `replace`: + *f = PatchOp(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "add", "remove", "replace"`, v) + } +} + +// Type always returns PatchOp to satisfy [pflag.Value] interface +func (f *PatchOp) Type() string { + return "PatchOp" +} + +type PatchResponse struct { +} + +type PatchSchema string + +const PatchSchemaUrnIetfParamsScimApiMessages20PatchOp PatchSchema = `urn:ietf:params:scim:api:messages:2.0:PatchOp` + +// String representation for [fmt.Print] +func (f *PatchSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PatchSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:api:messages:2.0:PatchOp`: + *f = PatchSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:api:messages:2.0:PatchOp"`, v) + } +} + +// Type always returns PatchSchema to satisfy [pflag.Value] interface +func (f *PatchSchema) Type() string { + return "PatchSchema" +} + +type Permission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel PermissionLevel `tfsdk:"permission_level"` +} + +type PermissionAssignment struct { + // Error response associated with a workspace permission assignment, if any. + Error types.String `tfsdk:"error"` + // The permissions level of the principal. + Permissions []WorkspacePermission `tfsdk:"permissions"` + // Information about the principal assigned to the workspace. + Principal *PrincipalOutput `tfsdk:"principal"` +} + +type PermissionAssignments struct { + // Array of permissions assignments defined for a workspace. + PermissionAssignments []PermissionAssignment `tfsdk:"permission_assignments"` +} + +// Permission level +type PermissionLevel string + +const PermissionLevelCanAttachTo PermissionLevel = `CAN_ATTACH_TO` + +const PermissionLevelCanBind PermissionLevel = `CAN_BIND` + +const PermissionLevelCanEdit PermissionLevel = `CAN_EDIT` + +const PermissionLevelCanEditMetadata PermissionLevel = `CAN_EDIT_METADATA` + +const PermissionLevelCanManage PermissionLevel = `CAN_MANAGE` + +const PermissionLevelCanManageProductionVersions PermissionLevel = `CAN_MANAGE_PRODUCTION_VERSIONS` + +const PermissionLevelCanManageRun PermissionLevel = `CAN_MANAGE_RUN` + +const PermissionLevelCanManageStagingVersions PermissionLevel = `CAN_MANAGE_STAGING_VERSIONS` + +const PermissionLevelCanQuery PermissionLevel = `CAN_QUERY` + +const PermissionLevelCanRead PermissionLevel = `CAN_READ` + +const PermissionLevelCanRestart PermissionLevel = `CAN_RESTART` + +const PermissionLevelCanRun PermissionLevel = `CAN_RUN` + +const PermissionLevelCanUse PermissionLevel = `CAN_USE` + +const PermissionLevelCanView PermissionLevel = `CAN_VIEW` + +const PermissionLevelCanViewMetadata PermissionLevel = `CAN_VIEW_METADATA` + +const PermissionLevelIsOwner PermissionLevel = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *PermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PermissionLevel) Set(v string) error { + switch v { + case `CAN_ATTACH_TO`, `CAN_BIND`, `CAN_EDIT`, `CAN_EDIT_METADATA`, `CAN_MANAGE`, `CAN_MANAGE_PRODUCTION_VERSIONS`, `CAN_MANAGE_RUN`, `CAN_MANAGE_STAGING_VERSIONS`, `CAN_QUERY`, `CAN_READ`, `CAN_RESTART`, `CAN_RUN`, `CAN_USE`, `CAN_VIEW`, `CAN_VIEW_METADATA`, `IS_OWNER`: + *f = PermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_ATTACH_TO", "CAN_BIND", "CAN_EDIT", "CAN_EDIT_METADATA", "CAN_MANAGE", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE_RUN", "CAN_MANAGE_STAGING_VERSIONS", "CAN_QUERY", "CAN_READ", "CAN_RESTART", "CAN_RUN", "CAN_USE", "CAN_VIEW", "CAN_VIEW_METADATA", "IS_OWNER"`, v) + } +} + +// Type always returns PermissionLevel to satisfy [pflag.Value] interface +func (f *PermissionLevel) Type() string { + return "PermissionLevel" +} + +type PermissionMigrationRequest struct { + // The name of the workspace group that permissions will be migrated from. + FromWorkspaceGroupName types.String `tfsdk:"from_workspace_group_name"` + // The maximum number of permissions that will be migrated. + Size types.Int64 `tfsdk:"size"` + // The name of the account group that permissions will be migrated to. + ToAccountGroupName types.String `tfsdk:"to_account_group_name"` + // WorkspaceId of the associated workspace where the permission migration + // will occur. Both workspace group and account group must be in this + // workspace. + WorkspaceId types.Int64 `tfsdk:"workspace_id"` +} + +type PermissionMigrationResponse struct { + // Number of permissions migrated. + PermissionsMigrated types.Int64 `tfsdk:"permissions_migrated"` +} + +type PermissionOutput struct { + // The results of a permissions query. + Description types.String `tfsdk:"description"` + + PermissionLevel WorkspacePermission `tfsdk:"permission_level"` +} + +type PermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel PermissionLevel `tfsdk:"permission_level"` +} + +type PermissionsRequest struct { + AccessControlList []AccessControlRequest `tfsdk:"access_control_list"` + // The id of the request object. + RequestObjectId types.String `tfsdk:"-" url:"-"` + // The type of the request object. Can be one of the following: + // authorization, clusters, cluster-policies, directories, experiments, + // files, instance-pools, jobs, notebooks, pipelines, registered-models, + // repos, serving-endpoints, or warehouses. + RequestObjectType types.String `tfsdk:"-" url:"-"` +} + +type PrincipalOutput struct { + // The display name of the principal. + DisplayName types.String `tfsdk:"display_name"` + // The group name of the group. Present only if the principal is a group. + GroupName types.String `tfsdk:"group_name"` + // The unique, opaque id of the principal. + PrincipalId types.Int64 `tfsdk:"principal_id"` + // The name of the service principal. Present only if the principal is a + // service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // The username of the user. Present only if the principal is a user. + UserName types.String `tfsdk:"user_name"` +} + +type ResourceMeta struct { + // Identifier for group type. Can be local workspace group + // (`WorkspaceGroup`) or account group (`Group`). + ResourceType types.String `tfsdk:"resourceType"` +} + +type Role struct { + // Role to assign to a principal or a list of principals on a resource. + Name types.String `tfsdk:"name"` +} + +type RuleSetResponse struct { + // Identifies the version of the rule set returned. + Etag types.String `tfsdk:"etag"` + + GrantRules []GrantRule `tfsdk:"grant_rules"` + // Name of the rule set. + Name types.String `tfsdk:"name"` +} + +type RuleSetUpdateRequest struct { + // The expected etag of the rule set to update. The update will fail if the + // value does not match the value that is stored in account access control + // service. + Etag types.String `tfsdk:"etag"` + + GrantRules []GrantRule `tfsdk:"grant_rules"` + // Name of the rule set. + Name types.String `tfsdk:"name"` +} + +type ServicePrincipal struct { + // If this user is active + Active types.Bool `tfsdk:"active"` + // UUID relating to the service principal + ApplicationId types.String `tfsdk:"applicationId"` + // String that represents a concatenation of given and family names. + DisplayName types.String `tfsdk:"displayName"` + // Entitlements assigned to the service principal. See [assigning + // entitlements] for a full list of supported values. + // + // [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + Entitlements []ComplexValue `tfsdk:"entitlements"` + + ExternalId types.String `tfsdk:"externalId"` + + Groups []ComplexValue `tfsdk:"groups"` + // Databricks service principal ID. + Id types.String `tfsdk:"id"` + // Corresponds to AWS instance profile/arn role. + Roles []ComplexValue `tfsdk:"roles"` + // The schema of the List response. + Schemas []ServicePrincipalSchema `tfsdk:"schemas"` +} + +type ServicePrincipalSchema string + +const ServicePrincipalSchemaUrnIetfParamsScimSchemasCore20ServicePrincipal ServicePrincipalSchema = `urn:ietf:params:scim:schemas:core:2.0:ServicePrincipal` + +// String representation for [fmt.Print] +func (f *ServicePrincipalSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServicePrincipalSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:schemas:core:2.0:ServicePrincipal`: + *f = ServicePrincipalSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:schemas:core:2.0:ServicePrincipal"`, v) + } +} + +// Type always returns ServicePrincipalSchema to satisfy [pflag.Value] interface +func (f *ServicePrincipalSchema) Type() string { + return "ServicePrincipalSchema" +} + +type UpdateResponse struct { +} + +type UpdateRuleSetRequest struct { + // Name of the rule set. + Name types.String `tfsdk:"name"` + + RuleSet RuleSetUpdateRequest `tfsdk:"rule_set"` +} + +type UpdateWorkspaceAssignments struct { + // Array of permissions assignments to update on the workspace. Note that + // excluding this field will have the same effect as providing an empty list + // which will result in the deletion of all permissions for the principal. + Permissions []WorkspacePermission `tfsdk:"permissions"` + // The ID of the user, service principal, or group. + PrincipalId types.Int64 `tfsdk:"-" url:"-"` + // The workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +type User struct { + // If this user is active + Active types.Bool `tfsdk:"active"` + // String that represents a concatenation of given and family names. For + // example `John Smith`. This field cannot be updated through the Workspace + // SCIM APIs when [identity federation is enabled]. Use Account SCIM APIs to + // update `displayName`. + // + // [identity federation is enabled]: https://docs.databricks.com/administration-guide/users-groups/best-practices.html#enable-identity-federation + DisplayName types.String `tfsdk:"displayName"` + // All the emails associated with the Databricks user. + Emails []ComplexValue `tfsdk:"emails"` + // Entitlements assigned to the user. See [assigning entitlements] for a + // full list of supported values. + // + // [assigning entitlements]: https://docs.databricks.com/administration-guide/users-groups/index.html#assigning-entitlements + Entitlements []ComplexValue `tfsdk:"entitlements"` + // External ID is not currently supported. It is reserved for future use. + ExternalId types.String `tfsdk:"externalId"` + + Groups []ComplexValue `tfsdk:"groups"` + // Databricks user ID. This is automatically set by Databricks. Any value + // provided by the client will be ignored. + Id types.String `tfsdk:"id"` + + Name *Name `tfsdk:"name"` + // Corresponds to AWS instance profile/arn role. + Roles []ComplexValue `tfsdk:"roles"` + // The schema of the user. + Schemas []UserSchema `tfsdk:"schemas"` + // Email address of the Databricks user. + UserName types.String `tfsdk:"userName"` +} + +type UserSchema string + +const UserSchemaUrnIetfParamsScimSchemasCore20User UserSchema = `urn:ietf:params:scim:schemas:core:2.0:User` + +const UserSchemaUrnIetfParamsScimSchemasExtensionWorkspace20User UserSchema = `urn:ietf:params:scim:schemas:extension:workspace:2.0:User` + +// String representation for [fmt.Print] +func (f *UserSchema) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UserSchema) Set(v string) error { + switch v { + case `urn:ietf:params:scim:schemas:core:2.0:User`, `urn:ietf:params:scim:schemas:extension:workspace:2.0:User`: + *f = UserSchema(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "urn:ietf:params:scim:schemas:core:2.0:User", "urn:ietf:params:scim:schemas:extension:workspace:2.0:User"`, v) + } +} + +// Type always returns UserSchema to satisfy [pflag.Value] interface +func (f *UserSchema) Type() string { + return "UserSchema" +} + +type WorkspacePermission string + +const WorkspacePermissionAdmin WorkspacePermission = `ADMIN` + +const WorkspacePermissionUnknown WorkspacePermission = `UNKNOWN` + +const WorkspacePermissionUser WorkspacePermission = `USER` + +// String representation for [fmt.Print] +func (f *WorkspacePermission) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WorkspacePermission) Set(v string) error { + switch v { + case `ADMIN`, `UNKNOWN`, `USER`: + *f = WorkspacePermission(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ADMIN", "UNKNOWN", "USER"`, v) + } +} + +// Type always returns WorkspacePermission to satisfy [pflag.Value] interface +func (f *WorkspacePermission) Type() string { + return "WorkspacePermission" +} + +type WorkspacePermissions struct { + // Array of permissions defined for a workspace. + Permissions []PermissionOutput `tfsdk:"permissions"` +} diff --git a/service/jobs_tf/model.go b/service/jobs_tf/model.go new file mode 100755 index 0000000000..b48c893e2c --- /dev/null +++ b/service/jobs_tf/model.go @@ -0,0 +1,3466 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package jobs_tf + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type BaseJob struct { + // The time at which this job was created in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). + CreatedTime types.Int64 `tfsdk:"created_time"` + // The creator user name. This field won’t be included in the response if + // the user has already been deleted. + CreatorUserName types.String `tfsdk:"creator_user_name"` + // The canonical identifier for this job. + JobId types.Int64 `tfsdk:"job_id"` + // Settings for this job and all of its runs. These settings can be updated + // using the `resetJob` method. + Settings *JobSettings `tfsdk:"settings"` +} + +type BaseRun struct { + // The sequence number of this run attempt for a triggered job run. The + // initial attempt of a run has an attempt_number of 0\. If the initial run + // attempt fails, and the job has a retry policy (`max_retries` \> 0), + // subsequent runs are created with an `original_attempt_run_id` of the + // original attempt’s ID and an incrementing `attempt_number`. Runs are + // retried only until they succeed, and the maximum `attempt_number` is the + // same as the `max_retries` value for the job. + AttemptNumber types.Int64 `tfsdk:"attempt_number"` + // The time in milliseconds it took to terminate the cluster and clean up + // any associated artifacts. The duration of a task run is the sum of the + // `setup_duration`, `execution_duration`, and the `cleanup_duration`. The + // `cleanup_duration` field is set to 0 for multitask job runs. The total + // duration of a multitask job run is the value of the `run_duration` field. + CleanupDuration types.Int64 `tfsdk:"cleanup_duration"` + // The cluster used for this run. If the run is specified to use a new + // cluster, this field is set once the Jobs service has requested a cluster + // for the run. + ClusterInstance *ClusterInstance `tfsdk:"cluster_instance"` + // A snapshot of the job’s cluster specification when this run was + // created. + ClusterSpec *ClusterSpec `tfsdk:"cluster_spec"` + // The creator user name. This field won’t be included in the response if + // the user has already been deleted. + CreatorUserName types.String `tfsdk:"creator_user_name"` + // Description of the run + Description types.String `tfsdk:"description"` + // The time at which this run ended in epoch milliseconds (milliseconds + // since 1/1/1970 UTC). This field is set to 0 if the job is still running. + EndTime types.Int64 `tfsdk:"end_time"` + // The time in milliseconds it took to execute the commands in the JAR or + // notebook until they completed, failed, timed out, were cancelled, or + // encountered an unexpected error. The duration of a task run is the sum of + // the `setup_duration`, `execution_duration`, and the `cleanup_duration`. + // The `execution_duration` field is set to 0 for multitask job runs. The + // total duration of a multitask job run is the value of the `run_duration` + // field. + ExecutionDuration types.Int64 `tfsdk:"execution_duration"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `tfsdk:"git_source"` + // A list of job cluster specifications that can be shared and reused by + // tasks of this job. Libraries cannot be declared in a shared job cluster. + // You must declare dependent libraries in task settings. + JobClusters []JobCluster `tfsdk:"job_clusters"` + // The canonical identifier of the job that contains this run. + JobId types.Int64 `tfsdk:"job_id"` + // Job-level parameters used in the run + JobParameters []JobParameter `tfsdk:"job_parameters"` + // A unique identifier for this job run. This is set to the same value as + // `run_id`. + NumberInJob types.Int64 `tfsdk:"number_in_job"` + // If this run is a retry of a prior run attempt, this field contains the + // run_id of the original attempt; otherwise, it is the same as the run_id. + OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id"` + // The parameters used for this run. + OverridingParameters *RunParameters `tfsdk:"overriding_parameters"` + // The time in milliseconds that the run has spent in the queue. + QueueDuration types.Int64 `tfsdk:"queue_duration"` + // The repair history of the run. + RepairHistory []RepairHistoryItem `tfsdk:"repair_history"` + // The time in milliseconds it took the job run and all of its repairs to + // finish. + RunDuration types.Int64 `tfsdk:"run_duration"` + // The canonical identifier of the run. This ID is unique across all runs of + // all jobs. + RunId types.Int64 `tfsdk:"run_id"` + // An optional name for the run. The maximum length is 4096 bytes in UTF-8 + // encoding. + RunName types.String `tfsdk:"run_name"` + // The URL to the detail page of the run. + RunPageUrl types.String `tfsdk:"run_page_url"` + // The type of a run. * `JOB_RUN`: Normal job run. A run created with + // :method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with + // [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit run. A run created with + // :method:jobs/submit. + // + // [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow + RunType RunType `tfsdk:"run_type"` + // The cron schedule that triggered this run if it was triggered by the + // periodic scheduler. + Schedule *CronSchedule `tfsdk:"schedule"` + // The time in milliseconds it took to set up the cluster. For runs that run + // on new clusters this is the cluster creation time, for runs that run on + // existing clusters this time should be very short. The duration of a task + // run is the sum of the `setup_duration`, `execution_duration`, and the + // `cleanup_duration`. The `setup_duration` field is set to 0 for multitask + // job runs. The total duration of a multitask job run is the value of the + // `run_duration` field. + SetupDuration types.Int64 `tfsdk:"setup_duration"` + // The time at which this run was started in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). This may not be the time when the job + // task starts executing, for example, if the job is scheduled to run on a + // new cluster, this is the time the cluster creation call is issued. + StartTime types.Int64 `tfsdk:"start_time"` + // The current state of the run. + State *RunState `tfsdk:"state"` + // The list of tasks performed by the run. Each task has its own `run_id` + // which you can use to call `JobsGetOutput` to retrieve the run resutls. + Tasks []RunTask `tfsdk:"tasks"` + // The type of trigger that fired this run. + // + // * `PERIODIC`: Schedules that periodically trigger runs, such as a cron + // scheduler. * `ONE_TIME`: One time triggers that fire a single run. This + // occurs you triggered a single run on demand through the UI or the API. * + // `RETRY`: Indicates a run that is triggered as a retry of a previously + // failed run. This occurs when you request to re-run the job in case of + // failures. * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run + // Job task. * `FILE_ARRIVAL`: Indicates a run that is triggered by a file + // arrival. * `TABLE`: Indicates a run that is triggered by a table update. + Trigger TriggerType `tfsdk:"trigger"` + // Additional details about what triggered the run + TriggerInfo *TriggerInfo `tfsdk:"trigger_info"` +} + +type CancelAllRuns struct { + // Optional boolean parameter to cancel all queued runs. If no job_id is + // provided, all queued runs in the workspace are canceled. + AllQueuedRuns types.Bool `tfsdk:"all_queued_runs"` + // The canonical identifier of the job to cancel all runs of. + JobId types.Int64 `tfsdk:"job_id"` +} + +type CancelAllRunsResponse struct { +} + +type CancelRun struct { + // This field is required. + RunId types.Int64 `tfsdk:"run_id"` +} + +type CancelRunResponse struct { +} + +type ClusterInstance struct { + // The canonical identifier for the cluster used by a run. This field is + // always available for runs on existing clusters. For runs on new clusters, + // it becomes available once the cluster is created. This value can be used + // to view logs by browsing to `/#setting/sparkui/$cluster_id/driver-logs`. + // The logs continue to be available after the run completes. + // + // The response won’t include this field if the identifier is not + // available yet. + ClusterId types.String `tfsdk:"cluster_id"` + // The canonical identifier for the Spark context used by a run. This field + // is filled in once the run begins execution. This value can be used to + // view the Spark UI by browsing to + // `/#setting/sparkui/$cluster_id/$spark_context_id`. The Spark UI continues + // to be available after the run has completed. + // + // The response won’t include this field if the identifier is not + // available yet. + SparkContextId types.String `tfsdk:"spark_context_id"` +} + +type ClusterSpec struct { + // If existing_cluster_id, the ID of an existing cluster that is used for + // all runs. When running jobs or tasks on an existing cluster, you may need + // to manually restart the cluster if it stops responding. We suggest + // running jobs and tasks on new clusters for greater reliability + ExistingClusterId types.String `tfsdk:"existing_cluster_id"` + // If job_cluster_key, this task is executed reusing the cluster specified + // in `job.settings.job_clusters`. + JobClusterKey types.String `tfsdk:"job_cluster_key"` + // An optional list of libraries to be installed on the cluster. The default + // value is an empty list. + Libraries []compute.Library `tfsdk:"libraries"` + // If new_cluster, a description of a new cluster that is created for each + // run. + NewCluster *compute.ClusterSpec `tfsdk:"new_cluster"` +} + +type Condition string + +const ConditionAllUpdated Condition = `ALL_UPDATED` + +const ConditionAnyUpdated Condition = `ANY_UPDATED` + +// String representation for [fmt.Print] +func (f *Condition) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Condition) Set(v string) error { + switch v { + case `ALL_UPDATED`, `ANY_UPDATED`: + *f = Condition(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALL_UPDATED", "ANY_UPDATED"`, v) + } +} + +// Type always returns Condition to satisfy [pflag.Value] interface +func (f *Condition) Type() string { + return "Condition" +} + +type ConditionTask struct { + // The left operand of the condition task. Can be either a string value or a + // job state or parameter reference. + Left types.String `tfsdk:"left"` + // * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their + // operands. This means that `“12.0” == “12”` will evaluate to + // `false`. * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, + // `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their + // operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” + // >= “12”` will evaluate to `false`. + // + // The boolean comparison to task values can be implemented with operators + // `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it + // will be serialized to `“true”` or `“false”` for the comparison. + Op ConditionTaskOp `tfsdk:"op"` + // The right operand of the condition task. Can be either a string value or + // a job state or parameter reference. + Right types.String `tfsdk:"right"` +} + +// * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their +// operands. This means that `“12.0” == “12”` will evaluate to `false`. +// * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` +// operators perform numeric comparison of their operands. `“12.0” >= +// “12”` will evaluate to `true`, `“10.0” >= “12”` will evaluate to +// `false`. +// +// The boolean comparison to task values can be implemented with operators +// `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will +// be serialized to `“true”` or `“false”` for the comparison. +type ConditionTaskOp string + +const ConditionTaskOpEqualTo ConditionTaskOp = `EQUAL_TO` + +const ConditionTaskOpGreaterThan ConditionTaskOp = `GREATER_THAN` + +const ConditionTaskOpGreaterThanOrEqual ConditionTaskOp = `GREATER_THAN_OR_EQUAL` + +const ConditionTaskOpLessThan ConditionTaskOp = `LESS_THAN` + +const ConditionTaskOpLessThanOrEqual ConditionTaskOp = `LESS_THAN_OR_EQUAL` + +const ConditionTaskOpNotEqual ConditionTaskOp = `NOT_EQUAL` + +// String representation for [fmt.Print] +func (f *ConditionTaskOp) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ConditionTaskOp) Set(v string) error { + switch v { + case `EQUAL_TO`, `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `NOT_EQUAL`: + *f = ConditionTaskOp(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EQUAL_TO", "GREATER_THAN", "GREATER_THAN_OR_EQUAL", "LESS_THAN", "LESS_THAN_OR_EQUAL", "NOT_EQUAL"`, v) + } +} + +// Type always returns ConditionTaskOp to satisfy [pflag.Value] interface +func (f *ConditionTaskOp) Type() string { + return "ConditionTaskOp" +} + +type Continuous struct { + // Indicate whether the continuous execution of the job is paused or not. + // Defaults to UNPAUSED. + PauseStatus PauseStatus `tfsdk:"pause_status"` +} + +type CreateJob struct { + // List of permissions to set on the job. + AccessControlList []iam.AccessControlRequest `tfsdk:"access_control_list"` + // An optional continuous property for this job. The continuous property + // will ensure that there is always one run executing. Only one of + // `schedule` and `continuous` can be used. + Continuous *Continuous `tfsdk:"continuous"` + // Deployment information for jobs managed by external sources. + Deployment *JobDeployment `tfsdk:"deployment"` + // An optional description for the job. The maximum length is 1024 + // characters in UTF-8 encoding. + Description types.String `tfsdk:"description"` + // Edit mode of the job. + // + // * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * + // `EDITABLE`: The job is in an editable state and can be modified. + EditMode JobEditMode `tfsdk:"edit_mode"` + // An optional set of email addresses that is notified when runs of this job + // begin or complete as well as when this job is deleted. + EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications"` + // A list of task execution environment specifications that can be + // referenced by tasks of this job. + Environments []JobEnvironment `tfsdk:"environments"` + // Used to tell what is the format of the job. This field is ignored in + // Create/Update/Reset calls. When using the Jobs API 2.1 this value is + // always set to `"MULTI_TASK"`. + Format Format `tfsdk:"format"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `tfsdk:"git_source"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `tfsdk:"health"` + // A list of job cluster specifications that can be shared and reused by + // tasks of this job. Libraries cannot be declared in a shared job cluster. + // You must declare dependent libraries in task settings. + JobClusters []JobCluster `tfsdk:"job_clusters"` + // An optional maximum allowed number of concurrent runs of the job. Set + // this value if you want to be able to execute multiple runs of the same + // job concurrently. This is useful for example if you trigger your job on a + // frequent schedule and want to allow consecutive runs to overlap with each + // other, or if you want to trigger multiple runs which differ by their + // input parameters. This setting affects only new runs. For example, + // suppose the job’s concurrency is 4 and there are 4 concurrent active + // runs. Then setting the concurrency to 3 won’t kill any of the active + // runs. However, from then on, new runs are skipped unless there are fewer + // than 3 active runs. This value cannot exceed 1000. Setting this value to + // `0` causes all new runs to be skipped. + MaxConcurrentRuns types.Int64 `tfsdk:"max_concurrent_runs"` + // An optional name for the job. The maximum length is 4096 bytes in UTF-8 + // encoding. + Name types.String `tfsdk:"name"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // job. + NotificationSettings *JobNotificationSettings `tfsdk:"notification_settings"` + // Job-level parameter definitions + Parameters []JobParameterDefinition `tfsdk:"parameters"` + // The queue settings of the job. + Queue *QueueSettings `tfsdk:"queue"` + // Write-only setting, available only in Create/Update/Reset and Submit + // calls. Specifies the user or service principal that the job runs as. If + // not specified, the job runs as the user who created the job. + // + // Only `user_name` or `service_principal_name` can be specified. If both + // are specified, an error is thrown. + RunAs *JobRunAs `tfsdk:"run_as"` + // An optional periodic schedule for this job. The default behavior is that + // the job only runs when triggered by clicking “Run Now” in the Jobs UI + // or sending an API request to `runNow`. + Schedule *CronSchedule `tfsdk:"schedule"` + // A map of tags associated with the job. These are forwarded to the cluster + // as cluster tags for jobs clusters, and are subject to the same + // limitations as cluster tags. A maximum of 25 tags can be added to the + // job. + Tags map[string]types.String `tfsdk:"tags"` + // A list of task specifications to be executed by this job. + Tasks []Task `tfsdk:"tasks"` + // An optional timeout applied to each run of this job. A value of `0` means + // no timeout. + TimeoutSeconds types.Int64 `tfsdk:"timeout_seconds"` + // A configuration to trigger a run when certain conditions are met. The + // default behavior is that the job runs only when triggered by clicking + // “Run Now” in the Jobs UI or sending an API request to `runNow`. + Trigger *TriggerSettings `tfsdk:"trigger"` + // A collection of system notification IDs to notify when runs of this job + // begin or complete. + WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications"` +} + +// Job was created successfully +type CreateResponse struct { + // The canonical identifier for the newly created job. + JobId types.Int64 `tfsdk:"job_id"` +} + +type CronSchedule struct { + // Indicate whether this schedule is paused or not. + PauseStatus PauseStatus `tfsdk:"pause_status"` + // A Cron expression using Quartz syntax that describes the schedule for a + // job. See [Cron Trigger] for details. This field is required. + // + // [Cron Trigger]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html + QuartzCronExpression types.String `tfsdk:"quartz_cron_expression"` + // A Java timezone ID. The schedule for a job is resolved with respect to + // this timezone. See [Java TimeZone] for details. This field is required. + // + // [Java TimeZone]: https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html + TimezoneId types.String `tfsdk:"timezone_id"` +} + +type DbtOutput struct { + // An optional map of headers to send when retrieving the artifact from the + // `artifacts_link`. + ArtifactsHeaders map[string]types.String `tfsdk:"artifacts_headers"` + // A pre-signed URL to download the (compressed) dbt artifacts. This link is + // valid for a limited time (30 minutes). This information is only available + // after the run has finished. + ArtifactsLink types.String `tfsdk:"artifacts_link"` +} + +type DbtTask struct { + // Optional name of the catalog to use. The value is the top level in the + // 3-level namespace of Unity Catalog (catalog / schema / relation). The + // catalog value can only be specified if a warehouse_id is specified. + // Requires dbt-databricks >= 1.1.1. + Catalog types.String `tfsdk:"catalog"` + // A list of dbt commands to execute. All commands must start with `dbt`. + // This parameter must not be empty. A maximum of up to 10 commands can be + // provided. + Commands []types.String `tfsdk:"commands"` + // Optional (relative) path to the profiles directory. Can only be specified + // if no warehouse_id is specified. If no warehouse_id is specified and this + // folder is unset, the root directory is used. + ProfilesDirectory types.String `tfsdk:"profiles_directory"` + // Path to the project directory. Optional for Git sourced tasks, in which + // case if no value is provided, the root of the Git repository is used. + ProjectDirectory types.String `tfsdk:"project_directory"` + // Optional schema to write to. This parameter is only used when a + // warehouse_id is also provided. If not provided, the `default` schema is + // used. + Schema types.String `tfsdk:"schema"` + // Optional location type of the project directory. When set to `WORKSPACE`, + // the project will be retrieved from the local Databricks workspace. When + // set to `GIT`, the project will be retrieved from a Git repository defined + // in `git_source`. If the value is empty, the task will use `GIT` if + // `git_source` is defined and `WORKSPACE` otherwise. + // + // * `WORKSPACE`: Project is located in Databricks workspace. * `GIT`: + // Project is located in cloud Git provider. + Source Source `tfsdk:"source"` + // ID of the SQL warehouse to connect to. If provided, we automatically + // generate and provide the profile and connection details to dbt. It can be + // overridden on a per-command basis by using the `--profiles-dir` command + // line argument. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type DeleteJob struct { + // The canonical identifier of the job to delete. This field is required. + JobId types.Int64 `tfsdk:"job_id"` +} + +type DeleteResponse struct { +} + +type DeleteRun struct { + // ID of the run to delete. + RunId types.Int64 `tfsdk:"run_id"` +} + +type DeleteRunResponse struct { +} + +// Run was exported successfully. +type ExportRunOutput struct { + // The exported content in HTML format (one for every view item). To extract + // the HTML notebook from the JSON response, download and run this [Python + // script]. + // + // [Python script]: https://docs.databricks.com/en/_static/examples/extract.py + Views []ViewItem `tfsdk:"views"` +} + +// Export and retrieve a job run +type ExportRunRequest struct { + // The canonical identifier for the run. This field is required. + RunId types.Int64 `tfsdk:"-" url:"run_id"` + // Which views to export (CODE, DASHBOARDS, or ALL). Defaults to CODE. + ViewsToExport ViewsToExport `tfsdk:"-" url:"views_to_export,omitempty"` +} + +type FileArrivalTriggerConfiguration struct { + // If set, the trigger starts a run only after the specified amount of time + // passed since the last time the trigger fired. The minimum allowed value + // is 60 seconds + MinTimeBetweenTriggersSeconds types.Int64 `tfsdk:"min_time_between_triggers_seconds"` + // URL to be monitored for file arrivals. The path must point to the root or + // a subpath of the external location. + Url types.String `tfsdk:"url"` + // If set, the trigger starts a run only after no file activity has occurred + // for the specified amount of time. This makes it possible to wait for a + // batch of incoming files to arrive before triggering a run. The minimum + // allowed value is 60 seconds. + WaitAfterLastChangeSeconds types.Int64 `tfsdk:"wait_after_last_change_seconds"` +} + +type ForEachStats struct { + // Sample of 3 most common error messages occurred during the iteration. + ErrorMessageStats []ForEachTaskErrorMessageStats `tfsdk:"error_message_stats"` + // Describes stats of the iteration. Only latest retries are considered. + TaskRunStats *ForEachTaskTaskRunStats `tfsdk:"task_run_stats"` +} + +type ForEachTask struct { + // Controls the number of active iterations task runs. Default is 20, + // maximum allowed is 100. + Concurrency types.Int64 `tfsdk:"concurrency"` + // Array for task to iterate on. This can be a JSON string or a reference to + // an array parameter. + Inputs types.String `tfsdk:"inputs"` + // Configuration for the task that will be run for each element in the array + Task Task `tfsdk:"task"` +} + +type ForEachTaskErrorMessageStats struct { + // Describes the count of such error message encountered during the + // iterations. + Count types.Int64 `tfsdk:"count"` + // Describes the error message occured during the iterations. + ErrorMessage types.String `tfsdk:"error_message"` + // Describes the termination reason for the error message. + TerminationCategory types.String `tfsdk:"termination_category"` +} + +type ForEachTaskTaskRunStats struct { + // Describes the iteration runs having an active lifecycle state or an + // active run sub state. + ActiveIterations types.Int64 `tfsdk:"active_iterations"` + // Describes the number of failed and succeeded iteration runs. + CompletedIterations types.Int64 `tfsdk:"completed_iterations"` + // Describes the number of failed iteration runs. + FailedIterations types.Int64 `tfsdk:"failed_iterations"` + // Describes the number of iteration runs that have been scheduled. + ScheduledIterations types.Int64 `tfsdk:"scheduled_iterations"` + // Describes the number of succeeded iteration runs. + SucceededIterations types.Int64 `tfsdk:"succeeded_iterations"` + // Describes the length of the list of items to iterate over. + TotalIterations types.Int64 `tfsdk:"total_iterations"` +} + +type Format string + +const FormatMultiTask Format = `MULTI_TASK` + +const FormatSingleTask Format = `SINGLE_TASK` + +// String representation for [fmt.Print] +func (f *Format) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Format) Set(v string) error { + switch v { + case `MULTI_TASK`, `SINGLE_TASK`: + *f = Format(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MULTI_TASK", "SINGLE_TASK"`, v) + } +} + +// Type always returns Format to satisfy [pflag.Value] interface +func (f *Format) Type() string { + return "Format" +} + +// Get job permission levels +type GetJobPermissionLevelsRequest struct { + // The job for which to get or manage permissions. + JobId types.String `tfsdk:"-" url:"-"` +} + +type GetJobPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []JobPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get job permissions +type GetJobPermissionsRequest struct { + // The job for which to get or manage permissions. + JobId types.String `tfsdk:"-" url:"-"` +} + +// Get a single job +type GetJobRequest struct { + // The canonical identifier of the job to retrieve information about. This + // field is required. + JobId types.Int64 `tfsdk:"-" url:"job_id"` +} + +// Get the output for a single run +type GetRunOutputRequest struct { + // The canonical identifier for the run. + RunId types.Int64 `tfsdk:"-" url:"run_id"` +} + +// Get a single job run +type GetRunRequest struct { + // Whether to include the repair history in the response. + IncludeHistory types.Bool `tfsdk:"-" url:"include_history,omitempty"` + // Whether to include resolved parameter values in the response. + IncludeResolvedValues types.Bool `tfsdk:"-" url:"include_resolved_values,omitempty"` + // The canonical identifier of the run for which to retrieve the metadata. + // This field is required. + RunId types.Int64 `tfsdk:"-" url:"run_id"` +} + +type GitProvider string + +const GitProviderAwsCodeCommit GitProvider = `awsCodeCommit` + +const GitProviderAzureDevOpsServices GitProvider = `azureDevOpsServices` + +const GitProviderBitbucketCloud GitProvider = `bitbucketCloud` + +const GitProviderBitbucketServer GitProvider = `bitbucketServer` + +const GitProviderGitHub GitProvider = `gitHub` + +const GitProviderGitHubEnterprise GitProvider = `gitHubEnterprise` + +const GitProviderGitLab GitProvider = `gitLab` + +const GitProviderGitLabEnterpriseEdition GitProvider = `gitLabEnterpriseEdition` + +// String representation for [fmt.Print] +func (f *GitProvider) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GitProvider) Set(v string) error { + switch v { + case `awsCodeCommit`, `azureDevOpsServices`, `bitbucketCloud`, `bitbucketServer`, `gitHub`, `gitHubEnterprise`, `gitLab`, `gitLabEnterpriseEdition`: + *f = GitProvider(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "awsCodeCommit", "azureDevOpsServices", "bitbucketCloud", "bitbucketServer", "gitHub", "gitHubEnterprise", "gitLab", "gitLabEnterpriseEdition"`, v) + } +} + +// Type always returns GitProvider to satisfy [pflag.Value] interface +func (f *GitProvider) Type() string { + return "GitProvider" +} + +// Read-only state of the remote repository at the time the job was run. This +// field is only included on job runs. +type GitSnapshot struct { + // Commit that was used to execute the run. If git_branch was specified, + // this points to the HEAD of the branch at the time of the run; if git_tag + // was specified, this points to the commit the tag points to. + UsedCommit types.String `tfsdk:"used_commit"` +} + +// An optional specification for a remote Git repository containing the source +// code used by tasks. Version-controlled source code is supported by notebook, +// dbt, Python script, and SQL File tasks. +// +// If `git_source` is set, these tasks retrieve the file from the remote +// repository by default. However, this behavior can be overridden by setting +// `source` to `WORKSPACE` on the task. +// +// Note: dbt and SQL File tasks support only version-controlled sources. If dbt +// or SQL File tasks are used, `git_source` must be defined on the job. +type GitSource struct { + // Name of the branch to be checked out and used by this job. This field + // cannot be specified in conjunction with git_tag or git_commit. + GitBranch types.String `tfsdk:"git_branch"` + // Commit to be checked out and used by this job. This field cannot be + // specified in conjunction with git_branch or git_tag. + GitCommit types.String `tfsdk:"git_commit"` + // Unique identifier of the service used to host the Git repository. The + // value is case insensitive. + GitProvider GitProvider `tfsdk:"git_provider"` + // Read-only state of the remote repository at the time the job was run. + // This field is only included on job runs. + GitSnapshot *GitSnapshot `tfsdk:"git_snapshot"` + // Name of the tag to be checked out and used by this job. This field cannot + // be specified in conjunction with git_branch or git_commit. + GitTag types.String `tfsdk:"git_tag"` + // URL of the repository to be cloned by this job. + GitUrl types.String `tfsdk:"git_url"` + // The source of the job specification in the remote repository when the job + // is source controlled. + JobSource *JobSource `tfsdk:"job_source"` +} + +// Job was retrieved successfully. +type Job struct { + // The time at which this job was created in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). + CreatedTime types.Int64 `tfsdk:"created_time"` + // The creator user name. This field won’t be included in the response if + // the user has already been deleted. + CreatorUserName types.String `tfsdk:"creator_user_name"` + // The canonical identifier for this job. + JobId types.Int64 `tfsdk:"job_id"` + // The email of an active workspace user or the application ID of a service + // principal that the job runs as. This value can be changed by setting the + // `run_as` field when creating or updating a job. + // + // By default, `run_as_user_name` is based on the current job settings and + // is set to the creator of the job if job access control is disabled or to + // the user with the `is_owner` permission if job access control is enabled. + RunAsUserName types.String `tfsdk:"run_as_user_name"` + // Settings for this job and all of its runs. These settings can be updated + // using the `resetJob` method. + Settings *JobSettings `tfsdk:"settings"` +} + +type JobAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel JobPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type JobAccessControlResponse struct { + // All permissions. + AllPermissions []JobPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type JobCluster struct { + // A unique name for the job cluster. This field is required and must be + // unique within the job. `JobTaskSettings` may refer to this field to + // determine which cluster to launch for the task execution. + JobClusterKey types.String `tfsdk:"job_cluster_key"` + // If new_cluster, a description of a cluster that is created for each task. + NewCluster compute.ClusterSpec `tfsdk:"new_cluster"` +} + +type JobDeployment struct { + // The kind of deployment that manages the job. + // + // * `BUNDLE`: The job is managed by Databricks Asset Bundle. + Kind JobDeploymentKind `tfsdk:"kind"` + // Path of the file that contains deployment metadata. + MetadataFilePath types.String `tfsdk:"metadata_file_path"` +} + +// * `BUNDLE`: The job is managed by Databricks Asset Bundle. +type JobDeploymentKind string + +// The job is managed by Databricks Asset Bundle. +const JobDeploymentKindBundle JobDeploymentKind = `BUNDLE` + +// String representation for [fmt.Print] +func (f *JobDeploymentKind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobDeploymentKind) Set(v string) error { + switch v { + case `BUNDLE`: + *f = JobDeploymentKind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BUNDLE"`, v) + } +} + +// Type always returns JobDeploymentKind to satisfy [pflag.Value] interface +func (f *JobDeploymentKind) Type() string { + return "JobDeploymentKind" +} + +// Edit mode of the job. +// +// * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * +// `EDITABLE`: The job is in an editable state and can be modified. +type JobEditMode string + +// The job is in an editable state and can be modified. +const JobEditModeEditable JobEditMode = `EDITABLE` + +// The job is in a locked UI state and cannot be modified. +const JobEditModeUiLocked JobEditMode = `UI_LOCKED` + +// String representation for [fmt.Print] +func (f *JobEditMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobEditMode) Set(v string) error { + switch v { + case `EDITABLE`, `UI_LOCKED`: + *f = JobEditMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EDITABLE", "UI_LOCKED"`, v) + } +} + +// Type always returns JobEditMode to satisfy [pflag.Value] interface +func (f *JobEditMode) Type() string { + return "JobEditMode" +} + +type JobEmailNotifications struct { + // If true, do not send email to recipients specified in `on_failure` if the + // run is skipped. + NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs"` + // A list of email addresses to be notified when the duration of a run + // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in + // the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is + // specified in the `health` field for the job, notifications are not sent. + OnDurationWarningThresholdExceeded []types.String `tfsdk:"on_duration_warning_threshold_exceeded"` + // A list of email addresses to be notified when a run unsuccessfully + // completes. A run is considered to have completed unsuccessfully if it + // ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or + // `TIMED_OUT` result_state. If this is not specified on job creation, + // reset, or update the list is empty, and notifications are not sent. + OnFailure []types.String `tfsdk:"on_failure"` + // A list of email addresses to be notified when a run begins. If not + // specified on job creation, reset, or update, the list is empty, and + // notifications are not sent. + OnStart []types.String `tfsdk:"on_start"` + // A list of email addresses to notify when any streaming backlog thresholds + // are exceeded for any stream. Streaming backlog thresholds can be set in + // the `health` field using the following metrics: + // `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, + // `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is + // based on the 10-minute average of these metrics. If the issue persists, + // notifications are resent every 30 minutes. + OnStreamingBacklogExceeded []types.String `tfsdk:"on_streaming_backlog_exceeded"` + // A list of email addresses to be notified when a run successfully + // completes. A run is considered to have completed successfully if it ends + // with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If + // not specified on job creation, reset, or update, the list is empty, and + // notifications are not sent. + OnSuccess []types.String `tfsdk:"on_success"` +} + +type JobEnvironment struct { + // The key of an environment. It has to be unique within a job. + EnvironmentKey types.String `tfsdk:"environment_key"` + // The environment entity used to preserve serverless environment side panel + // and jobs' environment for non-notebook task. In this minimal environment + // spec, only pip dependencies are supported. + Spec *compute.Environment `tfsdk:"spec"` +} + +type JobNotificationSettings struct { + // If true, do not send notifications to recipients specified in + // `on_failure` if the run is canceled. + NoAlertForCanceledRuns types.Bool `tfsdk:"no_alert_for_canceled_runs"` + // If true, do not send notifications to recipients specified in + // `on_failure` if the run is skipped. + NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs"` +} + +type JobParameter struct { + // The optional default value of the parameter + Default types.String `tfsdk:"default"` + // The name of the parameter + Name types.String `tfsdk:"name"` + // The value used in the run + Value types.String `tfsdk:"value"` +} + +type JobParameterDefinition struct { + // Default value of the parameter. + Default types.String `tfsdk:"default"` + // The name of the defined parameter. May only contain alphanumeric + // characters, `_`, `-`, and `.` + Name types.String `tfsdk:"name"` +} + +type JobPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel JobPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type JobPermissionLevel string + +const JobPermissionLevelCanManage JobPermissionLevel = `CAN_MANAGE` + +const JobPermissionLevelCanManageRun JobPermissionLevel = `CAN_MANAGE_RUN` + +const JobPermissionLevelCanView JobPermissionLevel = `CAN_VIEW` + +const JobPermissionLevelIsOwner JobPermissionLevel = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *JobPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobPermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_MANAGE_RUN`, `CAN_VIEW`, `IS_OWNER`: + *f = JobPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_MANAGE_RUN", "CAN_VIEW", "IS_OWNER"`, v) + } +} + +// Type always returns JobPermissionLevel to satisfy [pflag.Value] interface +func (f *JobPermissionLevel) Type() string { + return "JobPermissionLevel" +} + +type JobPermissions struct { + AccessControlList []JobAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type JobPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel JobPermissionLevel `tfsdk:"permission_level"` +} + +type JobPermissionsRequest struct { + AccessControlList []JobAccessControlRequest `tfsdk:"access_control_list"` + // The job for which to get or manage permissions. + JobId types.String `tfsdk:"-" url:"-"` +} + +// Write-only setting, available only in Create/Update/Reset and Submit calls. +// Specifies the user or service principal that the job runs as. If not +// specified, the job runs as the user who created the job. +// +// Only `user_name` or `service_principal_name` can be specified. If both are +// specified, an error is thrown. +type JobRunAs struct { + // Application ID of an active service principal. Setting this field + // requires the `servicePrincipal/user` role. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // The email of an active workspace user. Non-admin users can only set this + // field to their own email. + UserName types.String `tfsdk:"user_name"` +} + +type JobSettings struct { + // An optional continuous property for this job. The continuous property + // will ensure that there is always one run executing. Only one of + // `schedule` and `continuous` can be used. + Continuous *Continuous `tfsdk:"continuous"` + // Deployment information for jobs managed by external sources. + Deployment *JobDeployment `tfsdk:"deployment"` + // An optional description for the job. The maximum length is 1024 + // characters in UTF-8 encoding. + Description types.String `tfsdk:"description"` + // Edit mode of the job. + // + // * `UI_LOCKED`: The job is in a locked UI state and cannot be modified. * + // `EDITABLE`: The job is in an editable state and can be modified. + EditMode JobEditMode `tfsdk:"edit_mode"` + // An optional set of email addresses that is notified when runs of this job + // begin or complete as well as when this job is deleted. + EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications"` + // A list of task execution environment specifications that can be + // referenced by tasks of this job. + Environments []JobEnvironment `tfsdk:"environments"` + // Used to tell what is the format of the job. This field is ignored in + // Create/Update/Reset calls. When using the Jobs API 2.1 this value is + // always set to `"MULTI_TASK"`. + Format Format `tfsdk:"format"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `tfsdk:"git_source"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `tfsdk:"health"` + // A list of job cluster specifications that can be shared and reused by + // tasks of this job. Libraries cannot be declared in a shared job cluster. + // You must declare dependent libraries in task settings. + JobClusters []JobCluster `tfsdk:"job_clusters"` + // An optional maximum allowed number of concurrent runs of the job. Set + // this value if you want to be able to execute multiple runs of the same + // job concurrently. This is useful for example if you trigger your job on a + // frequent schedule and want to allow consecutive runs to overlap with each + // other, or if you want to trigger multiple runs which differ by their + // input parameters. This setting affects only new runs. For example, + // suppose the job’s concurrency is 4 and there are 4 concurrent active + // runs. Then setting the concurrency to 3 won’t kill any of the active + // runs. However, from then on, new runs are skipped unless there are fewer + // than 3 active runs. This value cannot exceed 1000. Setting this value to + // `0` causes all new runs to be skipped. + MaxConcurrentRuns types.Int64 `tfsdk:"max_concurrent_runs"` + // An optional name for the job. The maximum length is 4096 bytes in UTF-8 + // encoding. + Name types.String `tfsdk:"name"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // job. + NotificationSettings *JobNotificationSettings `tfsdk:"notification_settings"` + // Job-level parameter definitions + Parameters []JobParameterDefinition `tfsdk:"parameters"` + // The queue settings of the job. + Queue *QueueSettings `tfsdk:"queue"` + // Write-only setting, available only in Create/Update/Reset and Submit + // calls. Specifies the user or service principal that the job runs as. If + // not specified, the job runs as the user who created the job. + // + // Only `user_name` or `service_principal_name` can be specified. If both + // are specified, an error is thrown. + RunAs *JobRunAs `tfsdk:"run_as"` + // An optional periodic schedule for this job. The default behavior is that + // the job only runs when triggered by clicking “Run Now” in the Jobs UI + // or sending an API request to `runNow`. + Schedule *CronSchedule `tfsdk:"schedule"` + // A map of tags associated with the job. These are forwarded to the cluster + // as cluster tags for jobs clusters, and are subject to the same + // limitations as cluster tags. A maximum of 25 tags can be added to the + // job. + Tags map[string]types.String `tfsdk:"tags"` + // A list of task specifications to be executed by this job. + Tasks []Task `tfsdk:"tasks"` + // An optional timeout applied to each run of this job. A value of `0` means + // no timeout. + TimeoutSeconds types.Int64 `tfsdk:"timeout_seconds"` + // A configuration to trigger a run when certain conditions are met. The + // default behavior is that the job runs only when triggered by clicking + // “Run Now” in the Jobs UI or sending an API request to `runNow`. + Trigger *TriggerSettings `tfsdk:"trigger"` + // A collection of system notification IDs to notify when runs of this job + // begin or complete. + WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications"` +} + +// The source of the job specification in the remote repository when the job is +// source controlled. +type JobSource struct { + // Dirty state indicates the job is not fully synced with the job + // specification in the remote repository. + // + // Possible values are: * `NOT_SYNCED`: The job is not yet synced with the + // remote job specification. Import the remote job specification from UI to + // make the job fully synced. * `DISCONNECTED`: The job is temporary + // disconnected from the remote job specification and is allowed for live + // edit. Import the remote job specification again from UI to make the job + // fully synced. + DirtyState JobSourceDirtyState `tfsdk:"dirty_state"` + // Name of the branch which the job is imported from. + ImportFromGitBranch types.String `tfsdk:"import_from_git_branch"` + // Path of the job YAML file that contains the job specification. + JobConfigPath types.String `tfsdk:"job_config_path"` +} + +// Dirty state indicates the job is not fully synced with the job specification +// in the remote repository. +// +// Possible values are: * `NOT_SYNCED`: The job is not yet synced with the +// remote job specification. Import the remote job specification from UI to make +// the job fully synced. * `DISCONNECTED`: The job is temporary disconnected +// from the remote job specification and is allowed for live edit. Import the +// remote job specification again from UI to make the job fully synced. +type JobSourceDirtyState string + +// The job is temporary disconnected from the remote job specification and is +// allowed for live edit. Import the remote job specification again from UI to +// make the job fully synced. +const JobSourceDirtyStateDisconnected JobSourceDirtyState = `DISCONNECTED` + +// The job is not yet synced with the remote job specification. Import the +// remote job specification from UI to make the job fully synced. +const JobSourceDirtyStateNotSynced JobSourceDirtyState = `NOT_SYNCED` + +// String representation for [fmt.Print] +func (f *JobSourceDirtyState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobSourceDirtyState) Set(v string) error { + switch v { + case `DISCONNECTED`, `NOT_SYNCED`: + *f = JobSourceDirtyState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "NOT_SYNCED"`, v) + } +} + +// Type always returns JobSourceDirtyState to satisfy [pflag.Value] interface +func (f *JobSourceDirtyState) Type() string { + return "JobSourceDirtyState" +} + +// Specifies the health metric that is being evaluated for a particular health +// rule. +// +// * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * +// `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data waiting +// to be consumed across all streams. This metric is in Private Preview. * +// `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset lag across all +// streams. This metric is in Private Preview. * `STREAMING_BACKLOG_SECONDS`: An +// estimate of the maximum consumer delay across all streams. This metric is in +// Private Preview. * `STREAMING_BACKLOG_FILES`: An estimate of the maximum +// number of outstanding files across all streams. This metric is in Private +// Preview. +type JobsHealthMetric string + +// Expected total time for a run in seconds. +const JobsHealthMetricRunDurationSeconds JobsHealthMetric = `RUN_DURATION_SECONDS` + +// An estimate of the maximum bytes of data waiting to be consumed across all +// streams. This metric is in Private Preview. +const JobsHealthMetricStreamingBacklogBytes JobsHealthMetric = `STREAMING_BACKLOG_BYTES` + +// An estimate of the maximum number of outstanding files across all streams. +// This metric is in Private Preview. +const JobsHealthMetricStreamingBacklogFiles JobsHealthMetric = `STREAMING_BACKLOG_FILES` + +// An estimate of the maximum offset lag across all streams. This metric is in +// Private Preview. +const JobsHealthMetricStreamingBacklogRecords JobsHealthMetric = `STREAMING_BACKLOG_RECORDS` + +// An estimate of the maximum consumer delay across all streams. This metric is +// in Private Preview. +const JobsHealthMetricStreamingBacklogSeconds JobsHealthMetric = `STREAMING_BACKLOG_SECONDS` + +// String representation for [fmt.Print] +func (f *JobsHealthMetric) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobsHealthMetric) Set(v string) error { + switch v { + case `RUN_DURATION_SECONDS`, `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_FILES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`: + *f = JobsHealthMetric(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "RUN_DURATION_SECONDS", "STREAMING_BACKLOG_BYTES", "STREAMING_BACKLOG_FILES", "STREAMING_BACKLOG_RECORDS", "STREAMING_BACKLOG_SECONDS"`, v) + } +} + +// Type always returns JobsHealthMetric to satisfy [pflag.Value] interface +func (f *JobsHealthMetric) Type() string { + return "JobsHealthMetric" +} + +// Specifies the operator used to compare the health metric value with the +// specified threshold. +type JobsHealthOperator string + +const JobsHealthOperatorGreaterThan JobsHealthOperator = `GREATER_THAN` + +// String representation for [fmt.Print] +func (f *JobsHealthOperator) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *JobsHealthOperator) Set(v string) error { + switch v { + case `GREATER_THAN`: + *f = JobsHealthOperator(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GREATER_THAN"`, v) + } +} + +// Type always returns JobsHealthOperator to satisfy [pflag.Value] interface +func (f *JobsHealthOperator) Type() string { + return "JobsHealthOperator" +} + +type JobsHealthRule struct { + // Specifies the health metric that is being evaluated for a particular + // health rule. + // + // * `RUN_DURATION_SECONDS`: Expected total time for a run in seconds. * + // `STREAMING_BACKLOG_BYTES`: An estimate of the maximum bytes of data + // waiting to be consumed across all streams. This metric is in Private + // Preview. * `STREAMING_BACKLOG_RECORDS`: An estimate of the maximum offset + // lag across all streams. This metric is in Private Preview. * + // `STREAMING_BACKLOG_SECONDS`: An estimate of the maximum consumer delay + // across all streams. This metric is in Private Preview. * + // `STREAMING_BACKLOG_FILES`: An estimate of the maximum number of + // outstanding files across all streams. This metric is in Private Preview. + Metric JobsHealthMetric `tfsdk:"metric"` + // Specifies the operator used to compare the health metric value with the + // specified threshold. + Op JobsHealthOperator `tfsdk:"op"` + // Specifies the threshold value that the health metric should obey to + // satisfy the health rule. + Value types.Int64 `tfsdk:"value"` +} + +// An optional set of health rules that can be defined for this job. +type JobsHealthRules struct { + Rules []JobsHealthRule `tfsdk:"rules"` +} + +// List jobs +type ListJobsRequest struct { + // Whether to include task and cluster details in the response. + ExpandTasks types.Bool `tfsdk:"-" url:"expand_tasks,omitempty"` + // The number of jobs to return. This value must be greater than 0 and less + // or equal to 100. The default value is 20. + Limit types.Int64 `tfsdk:"-" url:"limit,omitempty"` + // A filter on the list based on the exact (case insensitive) job name. + Name types.String `tfsdk:"-" url:"name,omitempty"` + // The offset of the first job to return, relative to the most recently + // created job. Deprecated since June 2023. Use `page_token` to iterate + // through the pages instead. + Offset types.Int64 `tfsdk:"-" url:"offset,omitempty"` + // Use `next_page_token` or `prev_page_token` returned from the previous + // request to list the next or previous page of jobs respectively. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +// List of jobs was retrieved successfully. +type ListJobsResponse struct { + // If true, additional jobs matching the provided filter are available for + // listing. + HasMore types.Bool `tfsdk:"has_more"` + // The list of jobs. Only included in the response if there are jobs to + // list. + Jobs []BaseJob `tfsdk:"jobs"` + // A token that can be used to list the next page of jobs (if applicable). + NextPageToken types.String `tfsdk:"next_page_token"` + // A token that can be used to list the previous page of jobs (if + // applicable). + PrevPageToken types.String `tfsdk:"prev_page_token"` +} + +// List job runs +type ListRunsRequest struct { + // If active_only is `true`, only active runs are included in the results; + // otherwise, lists both active and completed runs. An active run is a run + // in the `QUEUED`, `PENDING`, `RUNNING`, or `TERMINATING`. This field + // cannot be `true` when completed_only is `true`. + ActiveOnly types.Bool `tfsdk:"-" url:"active_only,omitempty"` + // If completed_only is `true`, only completed runs are included in the + // results; otherwise, lists both active and completed runs. This field + // cannot be `true` when active_only is `true`. + CompletedOnly types.Bool `tfsdk:"-" url:"completed_only,omitempty"` + // Whether to include task and cluster details in the response. + ExpandTasks types.Bool `tfsdk:"-" url:"expand_tasks,omitempty"` + // The job for which to list runs. If omitted, the Jobs service lists runs + // from all jobs. + JobId types.Int64 `tfsdk:"-" url:"job_id,omitempty"` + // The number of runs to return. This value must be greater than 0 and less + // than 25. The default value is 20. If a request specifies a limit of 0, + // the service instead uses the maximum limit. + Limit types.Int64 `tfsdk:"-" url:"limit,omitempty"` + // The offset of the first run to return, relative to the most recent run. + // Deprecated since June 2023. Use `page_token` to iterate through the pages + // instead. + Offset types.Int64 `tfsdk:"-" url:"offset,omitempty"` + // Use `next_page_token` or `prev_page_token` returned from the previous + // request to list the next or previous page of runs respectively. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // The type of runs to return. For a description of run types, see + // :method:jobs/getRun. + RunType RunType `tfsdk:"-" url:"run_type,omitempty"` + // Show runs that started _at or after_ this value. The value must be a UTC + // timestamp in milliseconds. Can be combined with _start_time_to_ to filter + // by a time range. + StartTimeFrom types.Int64 `tfsdk:"-" url:"start_time_from,omitempty"` + // Show runs that started _at or before_ this value. The value must be a UTC + // timestamp in milliseconds. Can be combined with _start_time_from_ to + // filter by a time range. + StartTimeTo types.Int64 `tfsdk:"-" url:"start_time_to,omitempty"` +} + +// List of runs was retrieved successfully. +type ListRunsResponse struct { + // If true, additional runs matching the provided filter are available for + // listing. + HasMore types.Bool `tfsdk:"has_more"` + // A token that can be used to list the next page of runs (if applicable). + NextPageToken types.String `tfsdk:"next_page_token"` + // A token that can be used to list the previous page of runs (if + // applicable). + PrevPageToken types.String `tfsdk:"prev_page_token"` + // A list of runs, from most recently started to least. Only included in the + // response if there are runs to list. + Runs []BaseRun `tfsdk:"runs"` +} + +type NotebookOutput struct { + // The value passed to + // [dbutils.notebook.exit()](/notebooks/notebook-workflows.html#notebook-workflows-exit). + // Databricks restricts this API to return the first 5 MB of the value. For + // a larger result, your job can store the results in a cloud storage + // service. This field is absent if `dbutils.notebook.exit()` was never + // called. + Result types.String `tfsdk:"result"` + // Whether or not the result was truncated. + Truncated types.Bool `tfsdk:"truncated"` +} + +type NotebookTask struct { + // Base parameters to be used for each run of this job. If the run is + // initiated by a call to :method:jobs/run Now with parameters specified, + // the two parameters maps are merged. If the same key is specified in + // `base_parameters` and in `run-now`, the value from `run-now` is used. Use + // [Task parameter variables] to set parameters containing information about + // job runs. + // + // If the notebook takes a parameter that is not specified in the job’s + // `base_parameters` or the `run-now` override parameters, the default value + // from the notebook is used. + // + // Retrieve these parameters in a notebook using [dbutils.widgets.get]. + // + // The JSON representation of this field cannot exceed 1MB. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets + BaseParameters map[string]types.String `tfsdk:"base_parameters"` + // The path of the notebook to be run in the Databricks workspace or remote + // repository. For notebooks stored in the Databricks workspace, the path + // must be absolute and begin with a slash. For notebooks stored in a remote + // repository, the path must be relative. This field is required. + NotebookPath types.String `tfsdk:"notebook_path"` + // Optional location type of the notebook. When set to `WORKSPACE`, the + // notebook will be retrieved from the local Databricks workspace. When set + // to `GIT`, the notebook will be retrieved from a Git repository defined in + // `git_source`. If the value is empty, the task will use `GIT` if + // `git_source` is defined and `WORKSPACE` otherwise. * `WORKSPACE`: + // Notebook is located in Databricks workspace. * `GIT`: Notebook is located + // in cloud Git provider. + Source Source `tfsdk:"source"` + // Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic + // SQL warehouses are NOT supported, please use serverless or pro SQL + // warehouses. + // + // Note that SQL warehouses only support SQL cells; if the notebook contains + // non-SQL cells, the run will fail. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type PauseStatus string + +const PauseStatusPaused PauseStatus = `PAUSED` + +const PauseStatusUnpaused PauseStatus = `UNPAUSED` + +// String representation for [fmt.Print] +func (f *PauseStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PauseStatus) Set(v string) error { + switch v { + case `PAUSED`, `UNPAUSED`: + *f = PauseStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PAUSED", "UNPAUSED"`, v) + } +} + +// Type always returns PauseStatus to satisfy [pflag.Value] interface +func (f *PauseStatus) Type() string { + return "PauseStatus" +} + +type PeriodicTriggerConfiguration struct { + // The interval at which the trigger should run. + Interval types.Int64 `tfsdk:"interval"` + // The unit of time for the interval. + Unit PeriodicTriggerConfigurationTimeUnit `tfsdk:"unit"` +} + +type PeriodicTriggerConfigurationTimeUnit string + +const PeriodicTriggerConfigurationTimeUnitDays PeriodicTriggerConfigurationTimeUnit = `DAYS` + +const PeriodicTriggerConfigurationTimeUnitHours PeriodicTriggerConfigurationTimeUnit = `HOURS` + +const PeriodicTriggerConfigurationTimeUnitTimeUnitUnspecified PeriodicTriggerConfigurationTimeUnit = `TIME_UNIT_UNSPECIFIED` + +const PeriodicTriggerConfigurationTimeUnitWeeks PeriodicTriggerConfigurationTimeUnit = `WEEKS` + +// String representation for [fmt.Print] +func (f *PeriodicTriggerConfigurationTimeUnit) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PeriodicTriggerConfigurationTimeUnit) Set(v string) error { + switch v { + case `DAYS`, `HOURS`, `TIME_UNIT_UNSPECIFIED`, `WEEKS`: + *f = PeriodicTriggerConfigurationTimeUnit(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DAYS", "HOURS", "TIME_UNIT_UNSPECIFIED", "WEEKS"`, v) + } +} + +// Type always returns PeriodicTriggerConfigurationTimeUnit to satisfy [pflag.Value] interface +func (f *PeriodicTriggerConfigurationTimeUnit) Type() string { + return "PeriodicTriggerConfigurationTimeUnit" +} + +type PipelineParams struct { + // If true, triggers a full refresh on the delta live table. + FullRefresh types.Bool `tfsdk:"full_refresh"` +} + +type PipelineTask struct { + // If true, triggers a full refresh on the delta live table. + FullRefresh types.Bool `tfsdk:"full_refresh"` + // The full name of the pipeline task to execute. + PipelineId types.String `tfsdk:"pipeline_id"` +} + +type PythonWheelTask struct { + // Named entry point to use, if it does not exist in the metadata of the + // package it executes the function from the package directly using + // `$packageName.$entryPoint()` + EntryPoint types.String `tfsdk:"entry_point"` + // Command-line parameters passed to Python wheel task in the form of + // `["--name=task", "--data=dbfs:/path/to/data.json"]`. Leave it empty if + // `parameters` is not null. + NamedParameters map[string]types.String `tfsdk:"named_parameters"` + // Name of the package to execute + PackageName types.String `tfsdk:"package_name"` + // Command-line parameters passed to Python wheel task. Leave it empty if + // `named_parameters` is not null. + Parameters []types.String `tfsdk:"parameters"` +} + +type QueueSettings struct { + // If true, enable queueing for the job. This is a required field. + Enabled types.Bool `tfsdk:"enabled"` +} + +type RepairHistoryItem struct { + // The end time of the (repaired) run. + EndTime types.Int64 `tfsdk:"end_time"` + // The ID of the repair. Only returned for the items that represent a repair + // in `repair_history`. + Id types.Int64 `tfsdk:"id"` + // The start time of the (repaired) run. + StartTime types.Int64 `tfsdk:"start_time"` + // The current state of the run. + State *RunState `tfsdk:"state"` + // The run IDs of the task runs that ran as part of this repair history + // item. + TaskRunIds []types.Int64 `tfsdk:"task_run_ids"` + // The repair history item type. Indicates whether a run is the original run + // or a repair run. + Type RepairHistoryItemType `tfsdk:"type"` +} + +// The repair history item type. Indicates whether a run is the original run or +// a repair run. +type RepairHistoryItemType string + +const RepairHistoryItemTypeOriginal RepairHistoryItemType = `ORIGINAL` + +const RepairHistoryItemTypeRepair RepairHistoryItemType = `REPAIR` + +// String representation for [fmt.Print] +func (f *RepairHistoryItemType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RepairHistoryItemType) Set(v string) error { + switch v { + case `ORIGINAL`, `REPAIR`: + *f = RepairHistoryItemType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ORIGINAL", "REPAIR"`, v) + } +} + +// Type always returns RepairHistoryItemType to satisfy [pflag.Value] interface +func (f *RepairHistoryItemType) Type() string { + return "RepairHistoryItemType" +} + +type RepairRun struct { + // An array of commands to execute for jobs with the dbt task, for example + // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt + // run"]` + DbtCommands []types.String `tfsdk:"dbt_commands"` + // A list of parameters for jobs with Spark JAR tasks, for example + // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the + // main function of the main class specified in the Spark JAR task. If not + // specified upon `run-now`, it defaults to an empty list. jar_params cannot + // be specified in conjunction with notebook_params. The JSON representation + // of this field (for example `{"jar_params":["john doe","35"]}`) cannot + // exceed 10,000 bytes. + // + // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set + // parameters containing information about job runs. + JarParams []types.String `tfsdk:"jar_params"` + // Job-level parameters used in the run. for example `"param": + // "overriding_val"` + JobParameters map[string]types.String `tfsdk:"job_parameters"` + // The ID of the latest repair. This parameter is not required when + // repairing a run for the first time, but must be provided on subsequent + // requests to repair the same run. + LatestRepairId types.Int64 `tfsdk:"latest_repair_id"` + // A map from keys to values for jobs with notebook task, for example + // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed + // to the notebook and is accessible through the [dbutils.widgets.get] + // function. + // + // If not specified upon `run-now`, the triggered run uses the job’s base + // parameters. + // + // notebook_params cannot be specified in conjunction with jar_params. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // The JSON representation of this field (for example + // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed + // 10,000 bytes. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + NotebookParams map[string]types.String `tfsdk:"notebook_params"` + + PipelineParams *PipelineParams `tfsdk:"pipeline_params"` + + PythonNamedParams map[string]types.String `tfsdk:"python_named_params"` + // A list of parameters for jobs with Python tasks, for example + // `"python_params": ["john doe", "35"]`. The parameters are passed to + // Python file as command-line parameters. If specified upon `run-now`, it + // would overwrite the parameters specified in job setting. The JSON + // representation of this field (for example `{"python_params":["john + // doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + PythonParams []types.String `tfsdk:"python_params"` + // If true, repair all failed tasks. Only one of `rerun_tasks` or + // `rerun_all_failed_tasks` can be used. + RerunAllFailedTasks types.Bool `tfsdk:"rerun_all_failed_tasks"` + // If true, repair all tasks that depend on the tasks in `rerun_tasks`, even + // if they were previously successful. Can be also used in combination with + // `rerun_all_failed_tasks`. + RerunDependentTasks types.Bool `tfsdk:"rerun_dependent_tasks"` + // The task keys of the task runs to repair. + RerunTasks []types.String `tfsdk:"rerun_tasks"` + // The job run ID of the run to repair. The run must not be in progress. + RunId types.Int64 `tfsdk:"run_id"` + // A list of parameters for jobs with spark submit task, for example + // `"spark_submit_params": ["--class", + // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to + // spark-submit script as command-line parameters. If specified upon + // `run-now`, it would overwrite the parameters specified in job setting. + // The JSON representation of this field (for example + // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + SparkSubmitParams []types.String `tfsdk:"spark_submit_params"` + // A map from keys to values for jobs with SQL task, for example + // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task + // does not support custom parameters. + SqlParams map[string]types.String `tfsdk:"sql_params"` +} + +// Run repair was initiated. +type RepairRunResponse struct { + // The ID of the repair. Must be provided in subsequent repairs using the + // `latest_repair_id` field to ensure sequential repairs. + RepairId types.Int64 `tfsdk:"repair_id"` +} + +type ResetJob struct { + // The canonical identifier of the job to reset. This field is required. + JobId types.Int64 `tfsdk:"job_id"` + // The new settings of the job. These settings completely replace the old + // settings. + // + // Changes to the field `JobBaseSettings.timeout_seconds` are applied to + // active runs. Changes to other fields are applied to future runs only. + NewSettings JobSettings `tfsdk:"new_settings"` +} + +type ResetResponse struct { +} + +type ResolvedConditionTaskValues struct { + Left types.String `tfsdk:"left"` + + Right types.String `tfsdk:"right"` +} + +type ResolvedDbtTaskValues struct { + Commands []types.String `tfsdk:"commands"` +} + +type ResolvedNotebookTaskValues struct { + BaseParameters map[string]types.String `tfsdk:"base_parameters"` +} + +type ResolvedParamPairValues struct { + Parameters map[string]types.String `tfsdk:"parameters"` +} + +type ResolvedPythonWheelTaskValues struct { + NamedParameters map[string]types.String `tfsdk:"named_parameters"` + + Parameters []types.String `tfsdk:"parameters"` +} + +type ResolvedRunJobTaskValues struct { + JobParameters map[string]types.String `tfsdk:"job_parameters"` + + Parameters map[string]types.String `tfsdk:"parameters"` +} + +type ResolvedStringParamsValues struct { + Parameters []types.String `tfsdk:"parameters"` +} + +type ResolvedValues struct { + ConditionTask *ResolvedConditionTaskValues `tfsdk:"condition_task"` + + DbtTask *ResolvedDbtTaskValues `tfsdk:"dbt_task"` + + NotebookTask *ResolvedNotebookTaskValues `tfsdk:"notebook_task"` + + PythonWheelTask *ResolvedPythonWheelTaskValues `tfsdk:"python_wheel_task"` + + RunJobTask *ResolvedRunJobTaskValues `tfsdk:"run_job_task"` + + SimulationTask *ResolvedParamPairValues `tfsdk:"simulation_task"` + + SparkJarTask *ResolvedStringParamsValues `tfsdk:"spark_jar_task"` + + SparkPythonTask *ResolvedStringParamsValues `tfsdk:"spark_python_task"` + + SparkSubmitTask *ResolvedStringParamsValues `tfsdk:"spark_submit_task"` + + SqlTask *ResolvedParamPairValues `tfsdk:"sql_task"` +} + +// Run was retrieved successfully +type Run struct { + // The sequence number of this run attempt for a triggered job run. The + // initial attempt of a run has an attempt_number of 0\. If the initial run + // attempt fails, and the job has a retry policy (`max_retries` \> 0), + // subsequent runs are created with an `original_attempt_run_id` of the + // original attempt’s ID and an incrementing `attempt_number`. Runs are + // retried only until they succeed, and the maximum `attempt_number` is the + // same as the `max_retries` value for the job. + AttemptNumber types.Int64 `tfsdk:"attempt_number"` + // The time in milliseconds it took to terminate the cluster and clean up + // any associated artifacts. The duration of a task run is the sum of the + // `setup_duration`, `execution_duration`, and the `cleanup_duration`. The + // `cleanup_duration` field is set to 0 for multitask job runs. The total + // duration of a multitask job run is the value of the `run_duration` field. + CleanupDuration types.Int64 `tfsdk:"cleanup_duration"` + // The cluster used for this run. If the run is specified to use a new + // cluster, this field is set once the Jobs service has requested a cluster + // for the run. + ClusterInstance *ClusterInstance `tfsdk:"cluster_instance"` + // A snapshot of the job’s cluster specification when this run was + // created. + ClusterSpec *ClusterSpec `tfsdk:"cluster_spec"` + // The creator user name. This field won’t be included in the response if + // the user has already been deleted. + CreatorUserName types.String `tfsdk:"creator_user_name"` + // Description of the run + Description types.String `tfsdk:"description"` + // The time at which this run ended in epoch milliseconds (milliseconds + // since 1/1/1970 UTC). This field is set to 0 if the job is still running. + EndTime types.Int64 `tfsdk:"end_time"` + // The time in milliseconds it took to execute the commands in the JAR or + // notebook until they completed, failed, timed out, were cancelled, or + // encountered an unexpected error. The duration of a task run is the sum of + // the `setup_duration`, `execution_duration`, and the `cleanup_duration`. + // The `execution_duration` field is set to 0 for multitask job runs. The + // total duration of a multitask job run is the value of the `run_duration` + // field. + ExecutionDuration types.Int64 `tfsdk:"execution_duration"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `tfsdk:"git_source"` + // A list of job cluster specifications that can be shared and reused by + // tasks of this job. Libraries cannot be declared in a shared job cluster. + // You must declare dependent libraries in task settings. + JobClusters []JobCluster `tfsdk:"job_clusters"` + // The canonical identifier of the job that contains this run. + JobId types.Int64 `tfsdk:"job_id"` + // Job-level parameters used in the run + JobParameters []JobParameter `tfsdk:"job_parameters"` + // A unique identifier for this job run. This is set to the same value as + // `run_id`. + NumberInJob types.Int64 `tfsdk:"number_in_job"` + // If this run is a retry of a prior run attempt, this field contains the + // run_id of the original attempt; otherwise, it is the same as the run_id. + OriginalAttemptRunId types.Int64 `tfsdk:"original_attempt_run_id"` + // The parameters used for this run. + OverridingParameters *RunParameters `tfsdk:"overriding_parameters"` + // The time in milliseconds that the run has spent in the queue. + QueueDuration types.Int64 `tfsdk:"queue_duration"` + // The repair history of the run. + RepairHistory []RepairHistoryItem `tfsdk:"repair_history"` + // The time in milliseconds it took the job run and all of its repairs to + // finish. + RunDuration types.Int64 `tfsdk:"run_duration"` + // The canonical identifier of the run. This ID is unique across all runs of + // all jobs. + RunId types.Int64 `tfsdk:"run_id"` + // An optional name for the run. The maximum length is 4096 bytes in UTF-8 + // encoding. + RunName types.String `tfsdk:"run_name"` + // The URL to the detail page of the run. + RunPageUrl types.String `tfsdk:"run_page_url"` + // The type of a run. * `JOB_RUN`: Normal job run. A run created with + // :method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with + // [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit run. A run created with + // :method:jobs/submit. + // + // [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow + RunType RunType `tfsdk:"run_type"` + // The cron schedule that triggered this run if it was triggered by the + // periodic scheduler. + Schedule *CronSchedule `tfsdk:"schedule"` + // The time in milliseconds it took to set up the cluster. For runs that run + // on new clusters this is the cluster creation time, for runs that run on + // existing clusters this time should be very short. The duration of a task + // run is the sum of the `setup_duration`, `execution_duration`, and the + // `cleanup_duration`. The `setup_duration` field is set to 0 for multitask + // job runs. The total duration of a multitask job run is the value of the + // `run_duration` field. + SetupDuration types.Int64 `tfsdk:"setup_duration"` + // The time at which this run was started in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). This may not be the time when the job + // task starts executing, for example, if the job is scheduled to run on a + // new cluster, this is the time the cluster creation call is issued. + StartTime types.Int64 `tfsdk:"start_time"` + // The current state of the run. + State *RunState `tfsdk:"state"` + // The list of tasks performed by the run. Each task has its own `run_id` + // which you can use to call `JobsGetOutput` to retrieve the run resutls. + Tasks []RunTask `tfsdk:"tasks"` + // The type of trigger that fired this run. + // + // * `PERIODIC`: Schedules that periodically trigger runs, such as a cron + // scheduler. * `ONE_TIME`: One time triggers that fire a single run. This + // occurs you triggered a single run on demand through the UI or the API. * + // `RETRY`: Indicates a run that is triggered as a retry of a previously + // failed run. This occurs when you request to re-run the job in case of + // failures. * `RUN_JOB_TASK`: Indicates a run that is triggered using a Run + // Job task. * `FILE_ARRIVAL`: Indicates a run that is triggered by a file + // arrival. * `TABLE`: Indicates a run that is triggered by a table update. + Trigger TriggerType `tfsdk:"trigger"` + // Additional details about what triggered the run + TriggerInfo *TriggerInfo `tfsdk:"trigger_info"` +} + +type RunConditionTask struct { + // The left operand of the condition task. Can be either a string value or a + // job state or parameter reference. + Left types.String `tfsdk:"left"` + // * `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their + // operands. This means that `“12.0” == “12”` will evaluate to + // `false`. * `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, + // `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their + // operands. `“12.0” >= “12”` will evaluate to `true`, `“10.0” + // >= “12”` will evaluate to `false`. + // + // The boolean comparison to task values can be implemented with operators + // `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it + // will be serialized to `“true”` or `“false”` for the comparison. + Op ConditionTaskOp `tfsdk:"op"` + // The condition expression evaluation result. Filled in if the task was + // successfully completed. Can be `"true"` or `"false"` + Outcome types.String `tfsdk:"outcome"` + // The right operand of the condition task. Can be either a string value or + // a job state or parameter reference. + Right types.String `tfsdk:"right"` +} + +type RunForEachTask struct { + // Controls the number of active iterations task runs. Default is 20, + // maximum allowed is 100. + Concurrency types.Int64 `tfsdk:"concurrency"` + // Array for task to iterate on. This can be a JSON string or a reference to + // an array parameter. + Inputs types.String `tfsdk:"inputs"` + // Read only field. Populated for GetRun and ListRuns RPC calls and stores + // the execution stats of an For each task + Stats *ForEachStats `tfsdk:"stats"` + // Configuration for the task that will be run for each element in the array + Task Task `tfsdk:"task"` +} + +// An optional value indicating the condition that determines whether the task +// should be run once its dependencies have been completed. When omitted, +// defaults to `ALL_SUCCESS`. +// +// Possible values are: * `ALL_SUCCESS`: All dependencies have executed and +// succeeded * `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded * +// `NONE_FAILED`: None of the dependencies have failed and at least one was +// executed * `ALL_DONE`: All dependencies have been completed * +// `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl +// dependencies have failed +type RunIf string + +// All dependencies have been completed +const RunIfAllDone RunIf = `ALL_DONE` + +// ALl dependencies have failed +const RunIfAllFailed RunIf = `ALL_FAILED` + +// All dependencies have executed and succeeded +const RunIfAllSuccess RunIf = `ALL_SUCCESS` + +// At least one dependency failed +const RunIfAtLeastOneFailed RunIf = `AT_LEAST_ONE_FAILED` + +// At least one dependency has succeeded +const RunIfAtLeastOneSuccess RunIf = `AT_LEAST_ONE_SUCCESS` + +// None of the dependencies have failed and at least one was executed +const RunIfNoneFailed RunIf = `NONE_FAILED` + +// String representation for [fmt.Print] +func (f *RunIf) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunIf) Set(v string) error { + switch v { + case `ALL_DONE`, `ALL_FAILED`, `ALL_SUCCESS`, `AT_LEAST_ONE_FAILED`, `AT_LEAST_ONE_SUCCESS`, `NONE_FAILED`: + *f = RunIf(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALL_DONE", "ALL_FAILED", "ALL_SUCCESS", "AT_LEAST_ONE_FAILED", "AT_LEAST_ONE_SUCCESS", "NONE_FAILED"`, v) + } +} + +// Type always returns RunIf to satisfy [pflag.Value] interface +func (f *RunIf) Type() string { + return "RunIf" +} + +type RunJobOutput struct { + // The run id of the triggered job run + RunId types.Int64 `tfsdk:"run_id"` +} + +type RunJobTask struct { + // An array of commands to execute for jobs with the dbt task, for example + // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt + // run"]` + DbtCommands []types.String `tfsdk:"dbt_commands"` + // A list of parameters for jobs with Spark JAR tasks, for example + // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the + // main function of the main class specified in the Spark JAR task. If not + // specified upon `run-now`, it defaults to an empty list. jar_params cannot + // be specified in conjunction with notebook_params. The JSON representation + // of this field (for example `{"jar_params":["john doe","35"]}`) cannot + // exceed 10,000 bytes. + // + // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set + // parameters containing information about job runs. + JarParams []types.String `tfsdk:"jar_params"` + // ID of the job to trigger. + JobId types.Int64 `tfsdk:"job_id"` + // Job-level parameters used to trigger the job. + JobParameters map[string]types.String `tfsdk:"job_parameters"` + // A map from keys to values for jobs with notebook task, for example + // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed + // to the notebook and is accessible through the [dbutils.widgets.get] + // function. + // + // If not specified upon `run-now`, the triggered run uses the job’s base + // parameters. + // + // notebook_params cannot be specified in conjunction with jar_params. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // The JSON representation of this field (for example + // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed + // 10,000 bytes. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + NotebookParams map[string]types.String `tfsdk:"notebook_params"` + + PipelineParams *PipelineParams `tfsdk:"pipeline_params"` + + PythonNamedParams map[string]types.String `tfsdk:"python_named_params"` + // A list of parameters for jobs with Python tasks, for example + // `"python_params": ["john doe", "35"]`. The parameters are passed to + // Python file as command-line parameters. If specified upon `run-now`, it + // would overwrite the parameters specified in job setting. The JSON + // representation of this field (for example `{"python_params":["john + // doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + PythonParams []types.String `tfsdk:"python_params"` + // A list of parameters for jobs with spark submit task, for example + // `"spark_submit_params": ["--class", + // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to + // spark-submit script as command-line parameters. If specified upon + // `run-now`, it would overwrite the parameters specified in job setting. + // The JSON representation of this field (for example + // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + SparkSubmitParams []types.String `tfsdk:"spark_submit_params"` + // A map from keys to values for jobs with SQL task, for example + // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task + // does not support custom parameters. + SqlParams map[string]types.String `tfsdk:"sql_params"` +} + +// A value indicating the run's lifecycle state. The possible values are: * +// `QUEUED`: The run is queued. * `PENDING`: The run is waiting to be executed +// while the cluster and execution context are being prepared. * `RUNNING`: The +// task of this run is being executed. * `TERMINATING`: The task of this run has +// completed, and the cluster and execution context are being cleaned up. * +// `TERMINATED`: The task of this run has completed, and the cluster and +// execution context have been cleaned up. This state is terminal. * `SKIPPED`: +// This run was aborted because a previous run of the same job was already +// active. This state is terminal. * `INTERNAL_ERROR`: An exceptional state that +// indicates a failure in the Jobs service, such as network failure over a long +// period. If a run on a new cluster ends in the `INTERNAL_ERROR` state, the +// Jobs service terminates the cluster as soon as possible. This state is +// terminal. * `BLOCKED`: The run is blocked on an upstream dependency. * +// `WAITING_FOR_RETRY`: The run is waiting for a retry. +type RunLifeCycleState string + +// The run is blocked on an upstream dependency. +const RunLifeCycleStateBlocked RunLifeCycleState = `BLOCKED` + +// An exceptional state that indicates a failure in the Jobs service, such as +// network failure over a long period. If a run on a new cluster ends in the +// `INTERNAL_ERROR` state, the Jobs service terminates the cluster as soon as +// possible. This state is terminal. +const RunLifeCycleStateInternalError RunLifeCycleState = `INTERNAL_ERROR` + +// The run is waiting to be executed while the cluster and execution context are +// being prepared. +const RunLifeCycleStatePending RunLifeCycleState = `PENDING` + +// The run is queued. +const RunLifeCycleStateQueued RunLifeCycleState = `QUEUED` + +// The task of this run is being executed. +const RunLifeCycleStateRunning RunLifeCycleState = `RUNNING` + +// This run was aborted because a previous run of the same job was already +// active. This state is terminal. +const RunLifeCycleStateSkipped RunLifeCycleState = `SKIPPED` + +// The task of this run has completed, and the cluster and execution context +// have been cleaned up. This state is terminal. +const RunLifeCycleStateTerminated RunLifeCycleState = `TERMINATED` + +// The task of this run has completed, and the cluster and execution context are +// being cleaned up. +const RunLifeCycleStateTerminating RunLifeCycleState = `TERMINATING` + +// The run is waiting for a retry. +const RunLifeCycleStateWaitingForRetry RunLifeCycleState = `WAITING_FOR_RETRY` + +// String representation for [fmt.Print] +func (f *RunLifeCycleState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunLifeCycleState) Set(v string) error { + switch v { + case `BLOCKED`, `INTERNAL_ERROR`, `PENDING`, `QUEUED`, `RUNNING`, `SKIPPED`, `TERMINATED`, `TERMINATING`, `WAITING_FOR_RETRY`: + *f = RunLifeCycleState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BLOCKED", "INTERNAL_ERROR", "PENDING", "QUEUED", "RUNNING", "SKIPPED", "TERMINATED", "TERMINATING", "WAITING_FOR_RETRY"`, v) + } +} + +// Type always returns RunLifeCycleState to satisfy [pflag.Value] interface +func (f *RunLifeCycleState) Type() string { + return "RunLifeCycleState" +} + +type RunNow struct { + // An array of commands to execute for jobs with the dbt task, for example + // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt + // run"]` + DbtCommands []types.String `tfsdk:"dbt_commands"` + // An optional token to guarantee the idempotency of job run requests. If a + // run with the provided token already exists, the request does not create a + // new run but returns the ID of the existing run instead. If a run with the + // provided token is deleted, an error is returned. + // + // If you specify the idempotency token, upon failure you can retry until + // the request succeeds. Databricks guarantees that exactly one run is + // launched with that idempotency token. + // + // This token must have at most 64 characters. + // + // For more information, see [How to ensure idempotency for jobs]. + // + // [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html + IdempotencyToken types.String `tfsdk:"idempotency_token"` + // A list of parameters for jobs with Spark JAR tasks, for example + // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the + // main function of the main class specified in the Spark JAR task. If not + // specified upon `run-now`, it defaults to an empty list. jar_params cannot + // be specified in conjunction with notebook_params. The JSON representation + // of this field (for example `{"jar_params":["john doe","35"]}`) cannot + // exceed 10,000 bytes. + // + // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set + // parameters containing information about job runs. + JarParams []types.String `tfsdk:"jar_params"` + // The ID of the job to be executed + JobId types.Int64 `tfsdk:"job_id"` + // Job-level parameters used in the run. for example `"param": + // "overriding_val"` + JobParameters map[string]types.String `tfsdk:"job_parameters"` + // A map from keys to values for jobs with notebook task, for example + // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed + // to the notebook and is accessible through the [dbutils.widgets.get] + // function. + // + // If not specified upon `run-now`, the triggered run uses the job’s base + // parameters. + // + // notebook_params cannot be specified in conjunction with jar_params. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // The JSON representation of this field (for example + // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed + // 10,000 bytes. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + NotebookParams map[string]types.String `tfsdk:"notebook_params"` + + PipelineParams *PipelineParams `tfsdk:"pipeline_params"` + + PythonNamedParams map[string]types.String `tfsdk:"python_named_params"` + // A list of parameters for jobs with Python tasks, for example + // `"python_params": ["john doe", "35"]`. The parameters are passed to + // Python file as command-line parameters. If specified upon `run-now`, it + // would overwrite the parameters specified in job setting. The JSON + // representation of this field (for example `{"python_params":["john + // doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + PythonParams []types.String `tfsdk:"python_params"` + // The queue settings of the run. + Queue *QueueSettings `tfsdk:"queue"` + // A list of parameters for jobs with spark submit task, for example + // `"spark_submit_params": ["--class", + // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to + // spark-submit script as command-line parameters. If specified upon + // `run-now`, it would overwrite the parameters specified in job setting. + // The JSON representation of this field (for example + // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + SparkSubmitParams []types.String `tfsdk:"spark_submit_params"` + // A map from keys to values for jobs with SQL task, for example + // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task + // does not support custom parameters. + SqlParams map[string]types.String `tfsdk:"sql_params"` +} + +// Run was started successfully. +type RunNowResponse struct { + // A unique identifier for this job run. This is set to the same value as + // `run_id`. + NumberInJob types.Int64 `tfsdk:"number_in_job"` + // The globally unique ID of the newly triggered run. + RunId types.Int64 `tfsdk:"run_id"` +} + +// Run output was retrieved successfully. +type RunOutput struct { + // The output of a dbt task, if available. + DbtOutput *DbtOutput `tfsdk:"dbt_output"` + // An error message indicating why a task failed or why output is not + // available. The message is unstructured, and its exact format is subject + // to change. + Error types.String `tfsdk:"error"` + // If there was an error executing the run, this field contains any + // available stack traces. + ErrorTrace types.String `tfsdk:"error_trace"` + + Info types.String `tfsdk:"info"` + // The output from tasks that write to standard streams (stdout/stderr) such + // as spark_jar_task, spark_python_task, python_wheel_task. + // + // It's not supported for the notebook_task, pipeline_task or + // spark_submit_task. + // + // Databricks restricts this API to return the last 5 MB of these logs. + Logs types.String `tfsdk:"logs"` + // Whether the logs are truncated. + LogsTruncated types.Bool `tfsdk:"logs_truncated"` + // All details of the run except for its output. + Metadata *Run `tfsdk:"metadata"` + // The output of a notebook task, if available. A notebook task that + // terminates (either successfully or with a failure) without calling + // `dbutils.notebook.exit()` is considered to have an empty output. This + // field is set but its result value is empty. Databricks restricts this API + // to return the first 5 MB of the output. To return a larger result, use + // the [ClusterLogConf] field to configure log storage for the job cluster. + // + // [ClusterLogConf]: https://docs.databricks.com/dev-tools/api/latest/clusters.html#clusterlogconf + NotebookOutput *NotebookOutput `tfsdk:"notebook_output"` + // The output of a run job task, if available + RunJobOutput *RunJobOutput `tfsdk:"run_job_output"` + // The output of a SQL task, if available. + SqlOutput *SqlOutput `tfsdk:"sql_output"` +} + +type RunParameters struct { + // An array of commands to execute for jobs with the dbt task, for example + // `"dbt_commands": ["dbt deps", "dbt seed", "dbt deps", "dbt seed", "dbt + // run"]` + DbtCommands []types.String `tfsdk:"dbt_commands"` + // A list of parameters for jobs with Spark JAR tasks, for example + // `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the + // main function of the main class specified in the Spark JAR task. If not + // specified upon `run-now`, it defaults to an empty list. jar_params cannot + // be specified in conjunction with notebook_params. The JSON representation + // of this field (for example `{"jar_params":["john doe","35"]}`) cannot + // exceed 10,000 bytes. + // + // Use [Task parameter variables](/jobs.html\"#parameter-variables\") to set + // parameters containing information about job runs. + JarParams []types.String `tfsdk:"jar_params"` + // A map from keys to values for jobs with notebook task, for example + // `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed + // to the notebook and is accessible through the [dbutils.widgets.get] + // function. + // + // If not specified upon `run-now`, the triggered run uses the job’s base + // parameters. + // + // notebook_params cannot be specified in conjunction with jar_params. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // The JSON representation of this field (for example + // `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed + // 10,000 bytes. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html + NotebookParams map[string]types.String `tfsdk:"notebook_params"` + + PipelineParams *PipelineParams `tfsdk:"pipeline_params"` + + PythonNamedParams map[string]types.String `tfsdk:"python_named_params"` + // A list of parameters for jobs with Python tasks, for example + // `"python_params": ["john doe", "35"]`. The parameters are passed to + // Python file as command-line parameters. If specified upon `run-now`, it + // would overwrite the parameters specified in job setting. The JSON + // representation of this field (for example `{"python_params":["john + // doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + PythonParams []types.String `tfsdk:"python_params"` + // A list of parameters for jobs with spark submit task, for example + // `"spark_submit_params": ["--class", + // "org.apache.spark.examples.SparkPi"]`. The parameters are passed to + // spark-submit script as command-line parameters. If specified upon + // `run-now`, it would overwrite the parameters specified in job setting. + // The JSON representation of this field (for example + // `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs + // + // Important + // + // These parameters accept only Latin characters (ASCII character set). + // Using non-ASCII characters returns an error. Examples of invalid, + // non-ASCII characters are Chinese, Japanese kanjis, and emojis. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + SparkSubmitParams []types.String `tfsdk:"spark_submit_params"` + // A map from keys to values for jobs with SQL task, for example + // `"sql_params": {"name": "john doe", "age": "35"}`. The SQL alert task + // does not support custom parameters. + SqlParams map[string]types.String `tfsdk:"sql_params"` +} + +// A value indicating the run's result. The possible values are: * `SUCCESS`: +// The task completed successfully. * `FAILED`: The task completed with an +// error. * `TIMEDOUT`: The run was stopped after reaching the timeout. * +// `CANCELED`: The run was canceled at user request. * +// `MAXIMUM_CONCURRENT_RUNS_REACHED`: The run was skipped because the maximum +// concurrent runs were reached. * `EXCLUDED`: The run was skipped because the +// necessary conditions were not met. * `SUCCESS_WITH_FAILURES`: The job run +// completed successfully with some failures; leaf tasks were successful. * +// `UPSTREAM_FAILED`: The run was skipped because of an upstream failure. * +// `UPSTREAM_CANCELED`: The run was skipped because an upstream task was +// canceled. +type RunResultState string + +// The run was canceled at user request. +const RunResultStateCanceled RunResultState = `CANCELED` + +// The run was skipped because the necessary conditions were not met. +const RunResultStateExcluded RunResultState = `EXCLUDED` + +// The task completed with an error. +const RunResultStateFailed RunResultState = `FAILED` + +// The run was skipped because the maximum concurrent runs were reached. +const RunResultStateMaximumConcurrentRunsReached RunResultState = `MAXIMUM_CONCURRENT_RUNS_REACHED` + +// The task completed successfully. +const RunResultStateSuccess RunResultState = `SUCCESS` + +// The job run completed successfully with some failures; leaf tasks were +// successful. +const RunResultStateSuccessWithFailures RunResultState = `SUCCESS_WITH_FAILURES` + +// The run was stopped after reaching the timeout. +const RunResultStateTimedout RunResultState = `TIMEDOUT` + +// The run was skipped because an upstream task was canceled. +const RunResultStateUpstreamCanceled RunResultState = `UPSTREAM_CANCELED` + +// The run was skipped because of an upstream failure. +const RunResultStateUpstreamFailed RunResultState = `UPSTREAM_FAILED` + +// String representation for [fmt.Print] +func (f *RunResultState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunResultState) Set(v string) error { + switch v { + case `CANCELED`, `EXCLUDED`, `FAILED`, `MAXIMUM_CONCURRENT_RUNS_REACHED`, `SUCCESS`, `SUCCESS_WITH_FAILURES`, `TIMEDOUT`, `UPSTREAM_CANCELED`, `UPSTREAM_FAILED`: + *f = RunResultState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "EXCLUDED", "FAILED", "MAXIMUM_CONCURRENT_RUNS_REACHED", "SUCCESS", "SUCCESS_WITH_FAILURES", "TIMEDOUT", "UPSTREAM_CANCELED", "UPSTREAM_FAILED"`, v) + } +} + +// Type always returns RunResultState to satisfy [pflag.Value] interface +func (f *RunResultState) Type() string { + return "RunResultState" +} + +// The current state of the run. +type RunState struct { + // A value indicating the run's current lifecycle state. This field is + // always available in the response. + LifeCycleState RunLifeCycleState `tfsdk:"life_cycle_state"` + // The reason indicating why the run was queued. + QueueReason types.String `tfsdk:"queue_reason"` + // A value indicating the run's result. This field is only available for + // terminal lifecycle states. + ResultState RunResultState `tfsdk:"result_state"` + // A descriptive message for the current state. This field is unstructured, + // and its exact format is subject to change. + StateMessage types.String `tfsdk:"state_message"` + // A value indicating whether a run was canceled manually by a user or by + // the scheduler because the run timed out. + UserCancelledOrTimedout types.Bool `tfsdk:"user_cancelled_or_timedout"` +} + +// Used when outputting a child run, in GetRun or ListRuns. +type RunTask struct { + // The sequence number of this run attempt for a triggered job run. The + // initial attempt of a run has an attempt_number of 0\. If the initial run + // attempt fails, and the job has a retry policy (`max_retries` \> 0), + // subsequent runs are created with an `original_attempt_run_id` of the + // original attempt’s ID and an incrementing `attempt_number`. Runs are + // retried only until they succeed, and the maximum `attempt_number` is the + // same as the `max_retries` value for the job. + AttemptNumber types.Int64 `tfsdk:"attempt_number"` + // The time in milliseconds it took to terminate the cluster and clean up + // any associated artifacts. The duration of a task run is the sum of the + // `setup_duration`, `execution_duration`, and the `cleanup_duration`. The + // `cleanup_duration` field is set to 0 for multitask job runs. The total + // duration of a multitask job run is the value of the `run_duration` field. + CleanupDuration types.Int64 `tfsdk:"cleanup_duration"` + // The cluster used for this run. If the run is specified to use a new + // cluster, this field is set once the Jobs service has requested a cluster + // for the run. + ClusterInstance *ClusterInstance `tfsdk:"cluster_instance"` + // If condition_task, specifies a condition with an outcome that can be used + // to control the execution of other tasks. Does not require a cluster to + // execute and does not support retries or notifications. + ConditionTask *RunConditionTask `tfsdk:"condition_task"` + // If dbt_task, indicates that this must execute a dbt task. It requires + // both Databricks SQL and the ability to use a serverless or a pro SQL + // warehouse. + DbtTask *DbtTask `tfsdk:"dbt_task"` + // An optional array of objects specifying the dependency graph of the task. + // All tasks specified in this field must complete successfully before + // executing this task. The key is `task_key`, and the value is the name + // assigned to the dependent task. + DependsOn []TaskDependency `tfsdk:"depends_on"` + // An optional description for this task. + Description types.String `tfsdk:"description"` + // An optional set of email addresses notified when the task run begins or + // completes. The default behavior is to not send any emails. + EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications"` + // The time at which this run ended in epoch milliseconds (milliseconds + // since 1/1/1970 UTC). This field is set to 0 if the job is still running. + EndTime types.Int64 `tfsdk:"end_time"` + // The key that references an environment spec in a job. This field is + // required for Python script, Python wheel and dbt tasks when using + // serverless compute. + EnvironmentKey types.String `tfsdk:"environment_key"` + // The time in milliseconds it took to execute the commands in the JAR or + // notebook until they completed, failed, timed out, were cancelled, or + // encountered an unexpected error. The duration of a task run is the sum of + // the `setup_duration`, `execution_duration`, and the `cleanup_duration`. + // The `execution_duration` field is set to 0 for multitask job runs. The + // total duration of a multitask job run is the value of the `run_duration` + // field. + ExecutionDuration types.Int64 `tfsdk:"execution_duration"` + // If existing_cluster_id, the ID of an existing cluster that is used for + // all runs. When running jobs or tasks on an existing cluster, you may need + // to manually restart the cluster if it stops responding. We suggest + // running jobs and tasks on new clusters for greater reliability + ExistingClusterId types.String `tfsdk:"existing_cluster_id"` + // If for_each_task, indicates that this task must execute the nested task + // within it. + ForEachTask *RunForEachTask `tfsdk:"for_each_task"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. If `git_source` is set, + // these tasks retrieve the file from the remote repository by default. + // However, this behavior can be overridden by setting `source` to + // `WORKSPACE` on the task. Note: dbt and SQL File tasks support only + // version-controlled sources. If dbt or SQL File tasks are used, + // `git_source` must be defined on the job. + GitSource *GitSource `tfsdk:"git_source"` + // If job_cluster_key, this task is executed reusing the cluster specified + // in `job.settings.job_clusters`. + JobClusterKey types.String `tfsdk:"job_cluster_key"` + // An optional list of libraries to be installed on the cluster. The default + // value is an empty list. + Libraries []compute.Library `tfsdk:"libraries"` + // If new_cluster, a description of a new cluster that is created for each + // run. + NewCluster *compute.ClusterSpec `tfsdk:"new_cluster"` + // If notebook_task, indicates that this task must run a notebook. This + // field may not be specified in conjunction with spark_jar_task. + NotebookTask *NotebookTask `tfsdk:"notebook_task"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // task run. + NotificationSettings *TaskNotificationSettings `tfsdk:"notification_settings"` + // If pipeline_task, indicates that this task must execute a Pipeline. + PipelineTask *PipelineTask `tfsdk:"pipeline_task"` + // If python_wheel_task, indicates that this job must execute a PythonWheel. + PythonWheelTask *PythonWheelTask `tfsdk:"python_wheel_task"` + // The time in milliseconds that the run has spent in the queue. + QueueDuration types.Int64 `tfsdk:"queue_duration"` + // Parameter values including resolved references + ResolvedValues *ResolvedValues `tfsdk:"resolved_values"` + // The time in milliseconds it took the job run and all of its repairs to + // finish. + RunDuration types.Int64 `tfsdk:"run_duration"` + // The ID of the task run. + RunId types.Int64 `tfsdk:"run_id"` + // An optional value indicating the condition that determines whether the + // task should be run once its dependencies have been completed. When + // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of + // possible values. + RunIf RunIf `tfsdk:"run_if"` + // If run_job_task, indicates that this task must execute another job. + RunJobTask *RunJobTask `tfsdk:"run_job_task"` + + RunPageUrl types.String `tfsdk:"run_page_url"` + // The time in milliseconds it took to set up the cluster. For runs that run + // on new clusters this is the cluster creation time, for runs that run on + // existing clusters this time should be very short. The duration of a task + // run is the sum of the `setup_duration`, `execution_duration`, and the + // `cleanup_duration`. The `setup_duration` field is set to 0 for multitask + // job runs. The total duration of a multitask job run is the value of the + // `run_duration` field. + SetupDuration types.Int64 `tfsdk:"setup_duration"` + // If spark_jar_task, indicates that this task must run a JAR. + SparkJarTask *SparkJarTask `tfsdk:"spark_jar_task"` + // If spark_python_task, indicates that this task must run a Python file. + SparkPythonTask *SparkPythonTask `tfsdk:"spark_python_task"` + // If `spark_submit_task`, indicates that this task must be launched by the + // spark submit script. This task can run only on new clusters. + // + // In the `new_cluster` specification, `libraries` and `spark_conf` are not + // supported. Instead, use `--jars` and `--py-files` to add Java and Python + // libraries and `--conf` to set the Spark configurations. + // + // `master`, `deploy-mode`, and `executor-cores` are automatically + // configured by Databricks; you _cannot_ specify them in parameters. + // + // By default, the Spark submit job uses all available memory (excluding + // reserved memory for Databricks services). You can set `--driver-memory`, + // and `--executor-memory` to a smaller value to leave some room for + // off-heap usage. + // + // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 + // paths. + SparkSubmitTask *SparkSubmitTask `tfsdk:"spark_submit_task"` + // If sql_task, indicates that this job must execute a SQL task. + SqlTask *SqlTask `tfsdk:"sql_task"` + // The time at which this run was started in epoch milliseconds + // (milliseconds since 1/1/1970 UTC). This may not be the time when the job + // task starts executing, for example, if the job is scheduled to run on a + // new cluster, this is the time the cluster creation call is issued. + StartTime types.Int64 `tfsdk:"start_time"` + // The current state of the run. + State *RunState `tfsdk:"state"` + // A unique name for the task. This field is used to refer to this task from + // other tasks. This field is required and must be unique within its parent + // job. On Update or Reset, this field is used to reference the tasks to be + // updated or reset. + TaskKey types.String `tfsdk:"task_key"` + // An optional timeout applied to each run of this job task. A value of `0` + // means no timeout. + TimeoutSeconds types.Int64 `tfsdk:"timeout_seconds"` + // A collection of system notification IDs to notify when the run begins or + // completes. The default behavior is to not send any system notifications. + // Task webhooks respect the task notification settings. + WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications"` +} + +// The type of a run. * `JOB_RUN`: Normal job run. A run created with +// :method:jobs/runNow. * `WORKFLOW_RUN`: Workflow run. A run created with +// [dbutils.notebook.run]. * `SUBMIT_RUN`: Submit run. A run created with +// :method:jobs/submit. +// +// [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow +type RunType string + +// Normal job run. A run created with :method:jobs/runNow. +const RunTypeJobRun RunType = `JOB_RUN` + +// Submit run. A run created with :method:jobs/submit. +const RunTypeSubmitRun RunType = `SUBMIT_RUN` + +// Workflow run. A run created with [dbutils.notebook.run]. +// +// [dbutils.notebook.run]: https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-workflow +const RunTypeWorkflowRun RunType = `WORKFLOW_RUN` + +// String representation for [fmt.Print] +func (f *RunType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunType) Set(v string) error { + switch v { + case `JOB_RUN`, `SUBMIT_RUN`, `WORKFLOW_RUN`: + *f = RunType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "JOB_RUN", "SUBMIT_RUN", "WORKFLOW_RUN"`, v) + } +} + +// Type always returns RunType to satisfy [pflag.Value] interface +func (f *RunType) Type() string { + return "RunType" +} + +// Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file +// will be retrieved\ from the local Databricks workspace. When set to `GIT`, +// the SQL file will be retrieved from a Git repository defined in `git_source`. +// If the value is empty, the task will use `GIT` if `git_source` is defined and +// `WORKSPACE` otherwise. +// +// * `WORKSPACE`: SQL file is located in Databricks workspace. * `GIT`: SQL file +// is located in cloud Git provider. +type Source string + +// SQL file is located in cloud Git provider. +const SourceGit Source = `GIT` + +// SQL file is located in workspace. +const SourceWorkspace Source = `WORKSPACE` + +// String representation for [fmt.Print] +func (f *Source) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Source) Set(v string) error { + switch v { + case `GIT`, `WORKSPACE`: + *f = Source(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GIT", "WORKSPACE"`, v) + } +} + +// Type always returns Source to satisfy [pflag.Value] interface +func (f *Source) Type() string { + return "Source" +} + +type SparkJarTask struct { + // Deprecated since 04/2016. Provide a `jar` through the `libraries` field + // instead. For an example, see :method:jobs/create. + JarUri types.String `tfsdk:"jar_uri"` + // The full name of the class containing the main method to be executed. + // This class must be contained in a JAR provided as a library. + // + // The code must use `SparkContext.getOrCreate` to obtain a Spark context; + // otherwise, runs of the job fail. + MainClassName types.String `tfsdk:"main_class_name"` + // Parameters passed to the main method. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + Parameters []types.String `tfsdk:"parameters"` +} + +type SparkPythonTask struct { + // Command line parameters passed to the Python file. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + Parameters []types.String `tfsdk:"parameters"` + // The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, + // adls:/, gcs:/) and workspace paths are supported. For python files stored + // in the Databricks workspace, the path must be absolute and begin with + // `/`. For files stored in a remote repository, the path must be relative. + // This field is required. + PythonFile types.String `tfsdk:"python_file"` + // Optional location type of the Python file. When set to `WORKSPACE` or not + // specified, the file will be retrieved from the local Databricks workspace + // or cloud location (if the `python_file` has a URI format). When set to + // `GIT`, the Python file will be retrieved from a Git repository defined in + // `git_source`. + // + // * `WORKSPACE`: The Python file is located in a Databricks workspace or at + // a cloud filesystem URI. * `GIT`: The Python file is located in a remote + // Git repository. + Source Source `tfsdk:"source"` +} + +type SparkSubmitTask struct { + // Command-line parameters passed to spark submit. + // + // Use [Task parameter variables] to set parameters containing information + // about job runs. + // + // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables + Parameters []types.String `tfsdk:"parameters"` +} + +type SqlAlertOutput struct { + // The state of the SQL alert. + // + // * UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not + // fulfill trigger conditions * TRIGGERED: alert evaluated and fulfilled + // trigger conditions + AlertState SqlAlertState `tfsdk:"alert_state"` + // The link to find the output results. + OutputLink types.String `tfsdk:"output_link"` + // The text of the SQL query. Can Run permission of the SQL query associated + // with the SQL alert is required to view this field. + QueryText types.String `tfsdk:"query_text"` + // Information about SQL statements executed in the run. + SqlStatements []SqlStatementOutput `tfsdk:"sql_statements"` + // The canonical identifier of the SQL warehouse. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +// The state of the SQL alert. +// +// * UNKNOWN: alert yet to be evaluated * OK: alert evaluated and did not +// fulfill trigger conditions * TRIGGERED: alert evaluated and fulfilled trigger +// conditions +type SqlAlertState string + +const SqlAlertStateOk SqlAlertState = `OK` + +const SqlAlertStateTriggered SqlAlertState = `TRIGGERED` + +const SqlAlertStateUnknown SqlAlertState = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *SqlAlertState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SqlAlertState) Set(v string) error { + switch v { + case `OK`, `TRIGGERED`, `UNKNOWN`: + *f = SqlAlertState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OK", "TRIGGERED", "UNKNOWN"`, v) + } +} + +// Type always returns SqlAlertState to satisfy [pflag.Value] interface +func (f *SqlAlertState) Type() string { + return "SqlAlertState" +} + +type SqlDashboardOutput struct { + // The canonical identifier of the SQL warehouse. + WarehouseId types.String `tfsdk:"warehouse_id"` + // Widgets executed in the run. Only SQL query based widgets are listed. + Widgets []SqlDashboardWidgetOutput `tfsdk:"widgets"` +} + +type SqlDashboardWidgetOutput struct { + // Time (in epoch milliseconds) when execution of the SQL widget ends. + EndTime types.Int64 `tfsdk:"end_time"` + // The information about the error when execution fails. + Error *SqlOutputError `tfsdk:"error"` + // The link to find the output results. + OutputLink types.String `tfsdk:"output_link"` + // Time (in epoch milliseconds) when execution of the SQL widget starts. + StartTime types.Int64 `tfsdk:"start_time"` + // The execution status of the SQL widget. + Status SqlDashboardWidgetOutputStatus `tfsdk:"status"` + // The canonical identifier of the SQL widget. + WidgetId types.String `tfsdk:"widget_id"` + // The title of the SQL widget. + WidgetTitle types.String `tfsdk:"widget_title"` +} + +type SqlDashboardWidgetOutputStatus string + +const SqlDashboardWidgetOutputStatusCancelled SqlDashboardWidgetOutputStatus = `CANCELLED` + +const SqlDashboardWidgetOutputStatusFailed SqlDashboardWidgetOutputStatus = `FAILED` + +const SqlDashboardWidgetOutputStatusPending SqlDashboardWidgetOutputStatus = `PENDING` + +const SqlDashboardWidgetOutputStatusRunning SqlDashboardWidgetOutputStatus = `RUNNING` + +const SqlDashboardWidgetOutputStatusSuccess SqlDashboardWidgetOutputStatus = `SUCCESS` + +// String representation for [fmt.Print] +func (f *SqlDashboardWidgetOutputStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SqlDashboardWidgetOutputStatus) Set(v string) error { + switch v { + case `CANCELLED`, `FAILED`, `PENDING`, `RUNNING`, `SUCCESS`: + *f = SqlDashboardWidgetOutputStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELLED", "FAILED", "PENDING", "RUNNING", "SUCCESS"`, v) + } +} + +// Type always returns SqlDashboardWidgetOutputStatus to satisfy [pflag.Value] interface +func (f *SqlDashboardWidgetOutputStatus) Type() string { + return "SqlDashboardWidgetOutputStatus" +} + +type SqlOutput struct { + // The output of a SQL alert task, if available. + AlertOutput *SqlAlertOutput `tfsdk:"alert_output"` + // The output of a SQL dashboard task, if available. + DashboardOutput *SqlDashboardOutput `tfsdk:"dashboard_output"` + // The output of a SQL query task, if available. + QueryOutput *SqlQueryOutput `tfsdk:"query_output"` +} + +type SqlOutputError struct { + // The error message when execution fails. + Message types.String `tfsdk:"message"` +} + +type SqlQueryOutput struct { + EndpointId types.String `tfsdk:"endpoint_id"` + // The link to find the output results. + OutputLink types.String `tfsdk:"output_link"` + // The text of the SQL query. Can Run permission of the SQL query is + // required to view this field. + QueryText types.String `tfsdk:"query_text"` + // Information about SQL statements executed in the run. + SqlStatements []SqlStatementOutput `tfsdk:"sql_statements"` + // The canonical identifier of the SQL warehouse. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type SqlStatementOutput struct { + // A key that can be used to look up query details. + LookupKey types.String `tfsdk:"lookup_key"` +} + +type SqlTask struct { + // If alert, indicates that this job must refresh a SQL alert. + Alert *SqlTaskAlert `tfsdk:"alert"` + // If dashboard, indicates that this job must refresh a SQL dashboard. + Dashboard *SqlTaskDashboard `tfsdk:"dashboard"` + // If file, indicates that this job runs a SQL file in a remote Git + // repository. + File *SqlTaskFile `tfsdk:"file"` + // Parameters to be used for each run of this job. The SQL alert task does + // not support custom parameters. + Parameters map[string]types.String `tfsdk:"parameters"` + // If query, indicates that this job must execute a SQL query. + Query *SqlTaskQuery `tfsdk:"query"` + // The canonical identifier of the SQL warehouse. Recommended to use with + // serverless or pro SQL warehouses. Classic SQL warehouses are only + // supported for SQL alert, dashboard and query tasks and are limited to + // scheduled single-task jobs. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type SqlTaskAlert struct { + // The canonical identifier of the SQL alert. + AlertId types.String `tfsdk:"alert_id"` + // If true, the alert notifications are not sent to subscribers. + PauseSubscriptions types.Bool `tfsdk:"pause_subscriptions"` + // If specified, alert notifications are sent to subscribers. + Subscriptions []SqlTaskSubscription `tfsdk:"subscriptions"` +} + +type SqlTaskDashboard struct { + // Subject of the email sent to subscribers of this task. + CustomSubject types.String `tfsdk:"custom_subject"` + // The canonical identifier of the SQL dashboard. + DashboardId types.String `tfsdk:"dashboard_id"` + // If true, the dashboard snapshot is not taken, and emails are not sent to + // subscribers. + PauseSubscriptions types.Bool `tfsdk:"pause_subscriptions"` + // If specified, dashboard snapshots are sent to subscriptions. + Subscriptions []SqlTaskSubscription `tfsdk:"subscriptions"` +} + +type SqlTaskFile struct { + // Path of the SQL file. Must be relative if the source is a remote Git + // repository and absolute for workspace paths. + Path types.String `tfsdk:"path"` + // Optional location type of the SQL file. When set to `WORKSPACE`, the SQL + // file will be retrieved from the local Databricks workspace. When set to + // `GIT`, the SQL file will be retrieved from a Git repository defined in + // `git_source`. If the value is empty, the task will use `GIT` if + // `git_source` is defined and `WORKSPACE` otherwise. + // + // * `WORKSPACE`: SQL file is located in Databricks workspace. * `GIT`: SQL + // file is located in cloud Git provider. + Source Source `tfsdk:"source"` +} + +type SqlTaskQuery struct { + // The canonical identifier of the SQL query. + QueryId types.String `tfsdk:"query_id"` +} + +type SqlTaskSubscription struct { + // The canonical identifier of the destination to receive email + // notification. This parameter is mutually exclusive with user_name. You + // cannot set both destination_id and user_name for subscription + // notifications. + DestinationId types.String `tfsdk:"destination_id"` + // The user name to receive the subscription email. This parameter is + // mutually exclusive with destination_id. You cannot set both + // destination_id and user_name for subscription notifications. + UserName types.String `tfsdk:"user_name"` +} + +type SubmitRun struct { + // List of permissions to set on the job. + AccessControlList []iam.AccessControlRequest `tfsdk:"access_control_list"` + // An optional set of email addresses notified when the run begins or + // completes. + EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications"` + // A list of task execution environment specifications that can be + // referenced by tasks of this run. + Environments []JobEnvironment `tfsdk:"environments"` + // An optional specification for a remote Git repository containing the + // source code used by tasks. Version-controlled source code is supported by + // notebook, dbt, Python script, and SQL File tasks. + // + // If `git_source` is set, these tasks retrieve the file from the remote + // repository by default. However, this behavior can be overridden by + // setting `source` to `WORKSPACE` on the task. + // + // Note: dbt and SQL File tasks support only version-controlled sources. If + // dbt or SQL File tasks are used, `git_source` must be defined on the job. + GitSource *GitSource `tfsdk:"git_source"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `tfsdk:"health"` + // An optional token that can be used to guarantee the idempotency of job + // run requests. If a run with the provided token already exists, the + // request does not create a new run but returns the ID of the existing run + // instead. If a run with the provided token is deleted, an error is + // returned. + // + // If you specify the idempotency token, upon failure you can retry until + // the request succeeds. Databricks guarantees that exactly one run is + // launched with that idempotency token. + // + // This token must have at most 64 characters. + // + // For more information, see [How to ensure idempotency for jobs]. + // + // [How to ensure idempotency for jobs]: https://kb.databricks.com/jobs/jobs-idempotency.html + IdempotencyToken types.String `tfsdk:"idempotency_token"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // run. + NotificationSettings *JobNotificationSettings `tfsdk:"notification_settings"` + // The queue settings of the one-time run. + Queue *QueueSettings `tfsdk:"queue"` + // Specifies the user or service principal that the job runs as. If not + // specified, the job runs as the user who submits the request. + RunAs *JobRunAs `tfsdk:"run_as"` + // An optional name for the run. The default value is `Untitled`. + RunName types.String `tfsdk:"run_name"` + + Tasks []SubmitTask `tfsdk:"tasks"` + // An optional timeout applied to each run of this job. A value of `0` means + // no timeout. + TimeoutSeconds types.Int64 `tfsdk:"timeout_seconds"` + // A collection of system notification IDs to notify when the run begins or + // completes. + WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications"` +} + +// Run was created and started successfully. +type SubmitRunResponse struct { + // The canonical identifier for the newly submitted run. + RunId types.Int64 `tfsdk:"run_id"` +} + +type SubmitTask struct { + // If condition_task, specifies a condition with an outcome that can be used + // to control the execution of other tasks. Does not require a cluster to + // execute and does not support retries or notifications. + ConditionTask *ConditionTask `tfsdk:"condition_task"` + // If dbt_task, indicates that this must execute a dbt task. It requires + // both Databricks SQL and the ability to use a serverless or a pro SQL + // warehouse. + DbtTask *DbtTask `tfsdk:"dbt_task"` + // An optional array of objects specifying the dependency graph of the task. + // All tasks specified in this field must complete successfully before + // executing this task. The key is `task_key`, and the value is the name + // assigned to the dependent task. + DependsOn []TaskDependency `tfsdk:"depends_on"` + // An optional description for this task. + Description types.String `tfsdk:"description"` + // An optional set of email addresses notified when the task run begins or + // completes. The default behavior is to not send any emails. + EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications"` + // The key that references an environment spec in a job. This field is + // required for Python script, Python wheel and dbt tasks when using + // serverless compute. + EnvironmentKey types.String `tfsdk:"environment_key"` + // If existing_cluster_id, the ID of an existing cluster that is used for + // all runs. When running jobs or tasks on an existing cluster, you may need + // to manually restart the cluster if it stops responding. We suggest + // running jobs and tasks on new clusters for greater reliability + ExistingClusterId types.String `tfsdk:"existing_cluster_id"` + // If for_each_task, indicates that this task must execute the nested task + // within it. + ForEachTask *ForEachTask `tfsdk:"for_each_task"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `tfsdk:"health"` + // An optional list of libraries to be installed on the cluster. The default + // value is an empty list. + Libraries []compute.Library `tfsdk:"libraries"` + // If new_cluster, a description of a new cluster that is created for each + // run. + NewCluster *compute.ClusterSpec `tfsdk:"new_cluster"` + // If notebook_task, indicates that this task must run a notebook. This + // field may not be specified in conjunction with spark_jar_task. + NotebookTask *NotebookTask `tfsdk:"notebook_task"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // task run. + NotificationSettings *TaskNotificationSettings `tfsdk:"notification_settings"` + // If pipeline_task, indicates that this task must execute a Pipeline. + PipelineTask *PipelineTask `tfsdk:"pipeline_task"` + // If python_wheel_task, indicates that this job must execute a PythonWheel. + PythonWheelTask *PythonWheelTask `tfsdk:"python_wheel_task"` + // An optional value indicating the condition that determines whether the + // task should be run once its dependencies have been completed. When + // omitted, defaults to `ALL_SUCCESS`. See :method:jobs/create for a list of + // possible values. + RunIf RunIf `tfsdk:"run_if"` + // If run_job_task, indicates that this task must execute another job. + RunJobTask *RunJobTask `tfsdk:"run_job_task"` + // If spark_jar_task, indicates that this task must run a JAR. + SparkJarTask *SparkJarTask `tfsdk:"spark_jar_task"` + // If spark_python_task, indicates that this task must run a Python file. + SparkPythonTask *SparkPythonTask `tfsdk:"spark_python_task"` + // If `spark_submit_task`, indicates that this task must be launched by the + // spark submit script. This task can run only on new clusters. + // + // In the `new_cluster` specification, `libraries` and `spark_conf` are not + // supported. Instead, use `--jars` and `--py-files` to add Java and Python + // libraries and `--conf` to set the Spark configurations. + // + // `master`, `deploy-mode`, and `executor-cores` are automatically + // configured by Databricks; you _cannot_ specify them in parameters. + // + // By default, the Spark submit job uses all available memory (excluding + // reserved memory for Databricks services). You can set `--driver-memory`, + // and `--executor-memory` to a smaller value to leave some room for + // off-heap usage. + // + // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 + // paths. + SparkSubmitTask *SparkSubmitTask `tfsdk:"spark_submit_task"` + // If sql_task, indicates that this job must execute a SQL task. + SqlTask *SqlTask `tfsdk:"sql_task"` + // A unique name for the task. This field is used to refer to this task from + // other tasks. This field is required and must be unique within its parent + // job. On Update or Reset, this field is used to reference the tasks to be + // updated or reset. + TaskKey types.String `tfsdk:"task_key"` + // An optional timeout applied to each run of this job task. A value of `0` + // means no timeout. + TimeoutSeconds types.Int64 `tfsdk:"timeout_seconds"` + // A collection of system notification IDs to notify when the run begins or + // completes. The default behavior is to not send any system notifications. + // Task webhooks respect the task notification settings. + WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications"` +} + +type TableUpdateTriggerConfiguration struct { + // The table(s) condition based on which to trigger a job run. + Condition Condition `tfsdk:"condition"` + // If set, the trigger starts a run only after the specified amount of time + // has passed since the last time the trigger fired. The minimum allowed + // value is 60 seconds. + MinTimeBetweenTriggersSeconds types.Int64 `tfsdk:"min_time_between_triggers_seconds"` + // A list of Delta tables to monitor for changes. The table name must be in + // the format `catalog_name.schema_name.table_name`. + TableNames []types.String `tfsdk:"table_names"` + // If set, the trigger starts a run only after no table updates have + // occurred for the specified time and can be used to wait for a series of + // table updates before triggering a run. The minimum allowed value is 60 + // seconds. + WaitAfterLastChangeSeconds types.Int64 `tfsdk:"wait_after_last_change_seconds"` +} + +type Task struct { + // If condition_task, specifies a condition with an outcome that can be used + // to control the execution of other tasks. Does not require a cluster to + // execute and does not support retries or notifications. + ConditionTask *ConditionTask `tfsdk:"condition_task"` + // If dbt_task, indicates that this must execute a dbt task. It requires + // both Databricks SQL and the ability to use a serverless or a pro SQL + // warehouse. + DbtTask *DbtTask `tfsdk:"dbt_task"` + // An optional array of objects specifying the dependency graph of the task. + // All tasks specified in this field must complete before executing this + // task. The task will run only if the `run_if` condition is true. The key + // is `task_key`, and the value is the name assigned to the dependent task. + DependsOn []TaskDependency `tfsdk:"depends_on"` + // An optional description for this task. + Description types.String `tfsdk:"description"` + // An option to disable auto optimization in serverless + DisableAutoOptimization types.Bool `tfsdk:"disable_auto_optimization"` + // An optional set of email addresses that is notified when runs of this + // task begin or complete as well as when this task is deleted. The default + // behavior is to not send any emails. + EmailNotifications *TaskEmailNotifications `tfsdk:"email_notifications"` + // The key that references an environment spec in a job. This field is + // required for Python script, Python wheel and dbt tasks when using + // serverless compute. + EnvironmentKey types.String `tfsdk:"environment_key"` + // If existing_cluster_id, the ID of an existing cluster that is used for + // all runs. When running jobs or tasks on an existing cluster, you may need + // to manually restart the cluster if it stops responding. We suggest + // running jobs and tasks on new clusters for greater reliability + ExistingClusterId types.String `tfsdk:"existing_cluster_id"` + // If for_each_task, indicates that this task must execute the nested task + // within it. + ForEachTask *ForEachTask `tfsdk:"for_each_task"` + // An optional set of health rules that can be defined for this job. + Health *JobsHealthRules `tfsdk:"health"` + // If job_cluster_key, this task is executed reusing the cluster specified + // in `job.settings.job_clusters`. + JobClusterKey types.String `tfsdk:"job_cluster_key"` + // An optional list of libraries to be installed on the cluster. The default + // value is an empty list. + Libraries []compute.Library `tfsdk:"libraries"` + // An optional maximum number of times to retry an unsuccessful run. A run + // is considered to be unsuccessful if it completes with the `FAILED` + // result_state or `INTERNAL_ERROR` `life_cycle_state`. The value `-1` means + // to retry indefinitely and the value `0` means to never retry. + MaxRetries types.Int64 `tfsdk:"max_retries"` + // An optional minimal interval in milliseconds between the start of the + // failed run and the subsequent retry run. The default behavior is that + // unsuccessful runs are immediately retried. + MinRetryIntervalMillis types.Int64 `tfsdk:"min_retry_interval_millis"` + // If new_cluster, a description of a new cluster that is created for each + // run. + NewCluster *compute.ClusterSpec `tfsdk:"new_cluster"` + // If notebook_task, indicates that this task must run a notebook. This + // field may not be specified in conjunction with spark_jar_task. + NotebookTask *NotebookTask `tfsdk:"notebook_task"` + // Optional notification settings that are used when sending notifications + // to each of the `email_notifications` and `webhook_notifications` for this + // task. + NotificationSettings *TaskNotificationSettings `tfsdk:"notification_settings"` + // If pipeline_task, indicates that this task must execute a Pipeline. + PipelineTask *PipelineTask `tfsdk:"pipeline_task"` + // If python_wheel_task, indicates that this job must execute a PythonWheel. + PythonWheelTask *PythonWheelTask `tfsdk:"python_wheel_task"` + // An optional policy to specify whether to retry a job when it times out. + // The default behavior is to not retry on timeout. + RetryOnTimeout types.Bool `tfsdk:"retry_on_timeout"` + // An optional value specifying the condition determining whether the task + // is run once its dependencies have been completed. + // + // * `ALL_SUCCESS`: All dependencies have executed and succeeded * + // `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded * + // `NONE_FAILED`: None of the dependencies have failed and at least one was + // executed * `ALL_DONE`: All dependencies have been completed * + // `AT_LEAST_ONE_FAILED`: At least one dependency failed * `ALL_FAILED`: ALl + // dependencies have failed + RunIf RunIf `tfsdk:"run_if"` + // If run_job_task, indicates that this task must execute another job. + RunJobTask *RunJobTask `tfsdk:"run_job_task"` + // If spark_jar_task, indicates that this task must run a JAR. + SparkJarTask *SparkJarTask `tfsdk:"spark_jar_task"` + // If spark_python_task, indicates that this task must run a Python file. + SparkPythonTask *SparkPythonTask `tfsdk:"spark_python_task"` + // If `spark_submit_task`, indicates that this task must be launched by the + // spark submit script. This task can run only on new clusters. + // + // In the `new_cluster` specification, `libraries` and `spark_conf` are not + // supported. Instead, use `--jars` and `--py-files` to add Java and Python + // libraries and `--conf` to set the Spark configurations. + // + // `master`, `deploy-mode`, and `executor-cores` are automatically + // configured by Databricks; you _cannot_ specify them in parameters. + // + // By default, the Spark submit job uses all available memory (excluding + // reserved memory for Databricks services). You can set `--driver-memory`, + // and `--executor-memory` to a smaller value to leave some room for + // off-heap usage. + // + // The `--jars`, `--py-files`, `--files` arguments support DBFS and S3 + // paths. + SparkSubmitTask *SparkSubmitTask `tfsdk:"spark_submit_task"` + // If sql_task, indicates that this job must execute a SQL task. + SqlTask *SqlTask `tfsdk:"sql_task"` + // A unique name for the task. This field is used to refer to this task from + // other tasks. This field is required and must be unique within its parent + // job. On Update or Reset, this field is used to reference the tasks to be + // updated or reset. + TaskKey types.String `tfsdk:"task_key"` + // An optional timeout applied to each run of this job task. A value of `0` + // means no timeout. + TimeoutSeconds types.Int64 `tfsdk:"timeout_seconds"` + // A collection of system notification IDs to notify when runs of this task + // begin or complete. The default behavior is to not send any system + // notifications. + WebhookNotifications *WebhookNotifications `tfsdk:"webhook_notifications"` +} + +type TaskDependency struct { + // Can only be specified on condition task dependencies. The outcome of the + // dependent task that must be met for this task to run. + Outcome types.String `tfsdk:"outcome"` + // The name of the task this task depends on. + TaskKey types.String `tfsdk:"task_key"` +} + +type TaskEmailNotifications struct { + // If true, do not send email to recipients specified in `on_failure` if the + // run is skipped. + NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs"` + // A list of email addresses to be notified when the duration of a run + // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in + // the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is + // specified in the `health` field for the job, notifications are not sent. + OnDurationWarningThresholdExceeded []types.String `tfsdk:"on_duration_warning_threshold_exceeded"` + // A list of email addresses to be notified when a run unsuccessfully + // completes. A run is considered to have completed unsuccessfully if it + // ends with an `INTERNAL_ERROR` `life_cycle_state` or a `FAILED`, or + // `TIMED_OUT` result_state. If this is not specified on job creation, + // reset, or update the list is empty, and notifications are not sent. + OnFailure []types.String `tfsdk:"on_failure"` + // A list of email addresses to be notified when a run begins. If not + // specified on job creation, reset, or update, the list is empty, and + // notifications are not sent. + OnStart []types.String `tfsdk:"on_start"` + // A list of email addresses to notify when any streaming backlog thresholds + // are exceeded for any stream. Streaming backlog thresholds can be set in + // the `health` field using the following metrics: + // `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, + // `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is + // based on the 10-minute average of these metrics. If the issue persists, + // notifications are resent every 30 minutes. + OnStreamingBacklogExceeded []types.String `tfsdk:"on_streaming_backlog_exceeded"` + // A list of email addresses to be notified when a run successfully + // completes. A run is considered to have completed successfully if it ends + // with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If + // not specified on job creation, reset, or update, the list is empty, and + // notifications are not sent. + OnSuccess []types.String `tfsdk:"on_success"` +} + +type TaskNotificationSettings struct { + // If true, do not send notifications to recipients specified in `on_start` + // for the retried runs and do not send notifications to recipients + // specified in `on_failure` until the last retry of the run. + AlertOnLastAttempt types.Bool `tfsdk:"alert_on_last_attempt"` + // If true, do not send notifications to recipients specified in + // `on_failure` if the run is canceled. + NoAlertForCanceledRuns types.Bool `tfsdk:"no_alert_for_canceled_runs"` + // If true, do not send notifications to recipients specified in + // `on_failure` if the run is skipped. + NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs"` +} + +// Additional details about what triggered the run +type TriggerInfo struct { + // The run id of the Run Job task run + RunId types.Int64 `tfsdk:"run_id"` +} + +type TriggerSettings struct { + // File arrival trigger settings. + FileArrival *FileArrivalTriggerConfiguration `tfsdk:"file_arrival"` + // Whether this trigger is paused or not. + PauseStatus PauseStatus `tfsdk:"pause_status"` + // Periodic trigger settings. + Periodic *PeriodicTriggerConfiguration `tfsdk:"periodic"` + // Old table trigger settings name. Deprecated in favor of `table_update`. + Table *TableUpdateTriggerConfiguration `tfsdk:"table"` + + TableUpdate *TableUpdateTriggerConfiguration `tfsdk:"table_update"` +} + +// The type of trigger that fired this run. +// +// * `PERIODIC`: Schedules that periodically trigger runs, such as a cron +// scheduler. * `ONE_TIME`: One time triggers that fire a single run. This +// occurs you triggered a single run on demand through the UI or the API. * +// `RETRY`: Indicates a run that is triggered as a retry of a previously failed +// run. This occurs when you request to re-run the job in case of failures. * +// `RUN_JOB_TASK`: Indicates a run that is triggered using a Run Job task. * +// `FILE_ARRIVAL`: Indicates a run that is triggered by a file arrival. * +// `TABLE`: Indicates a run that is triggered by a table update. +type TriggerType string + +// Indicates a run that is triggered by a file arrival. +const TriggerTypeFileArrival TriggerType = `FILE_ARRIVAL` + +// One time triggers that fire a single run. This occurs you triggered a single +// run on demand through the UI or the API. +const TriggerTypeOneTime TriggerType = `ONE_TIME` + +// Schedules that periodically trigger runs, such as a cron scheduler. +const TriggerTypePeriodic TriggerType = `PERIODIC` + +// Indicates a run that is triggered as a retry of a previously failed run. This +// occurs when you request to re-run the job in case of failures. +const TriggerTypeRetry TriggerType = `RETRY` + +// Indicates a run that is triggered using a Run Job task. +const TriggerTypeRunJobTask TriggerType = `RUN_JOB_TASK` + +// Indicates a run that is triggered by a table update. +const TriggerTypeTable TriggerType = `TABLE` + +// String representation for [fmt.Print] +func (f *TriggerType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TriggerType) Set(v string) error { + switch v { + case `FILE_ARRIVAL`, `ONE_TIME`, `PERIODIC`, `RETRY`, `RUN_JOB_TASK`, `TABLE`: + *f = TriggerType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FILE_ARRIVAL", "ONE_TIME", "PERIODIC", "RETRY", "RUN_JOB_TASK", "TABLE"`, v) + } +} + +// Type always returns TriggerType to satisfy [pflag.Value] interface +func (f *TriggerType) Type() string { + return "TriggerType" +} + +type UpdateJob struct { + // Remove top-level fields in the job settings. Removing nested fields is + // not supported, except for tasks and job clusters (`tasks/task_1`). This + // field is optional. + FieldsToRemove []types.String `tfsdk:"fields_to_remove"` + // The canonical identifier of the job to update. This field is required. + JobId types.Int64 `tfsdk:"job_id"` + // The new settings for the job. + // + // Top-level fields specified in `new_settings` are completely replaced, + // except for arrays which are merged. That is, new and existing entries are + // completely replaced based on the respective key fields, i.e. `task_key` + // or `job_cluster_key`, while previous entries are kept. + // + // Partially updating nested fields is not supported. + // + // Changes to the field `JobSettings.timeout_seconds` are applied to active + // runs. Changes to other fields are applied to future runs only. + NewSettings *JobSettings `tfsdk:"new_settings"` +} + +type UpdateResponse struct { +} + +type ViewItem struct { + // Content of the view. + Content types.String `tfsdk:"content"` + // Name of the view item. In the case of code view, it would be the + // notebook’s name. In the case of dashboard view, it would be the + // dashboard’s name. + Name types.String `tfsdk:"name"` + // Type of the view item. + Type ViewType `tfsdk:"type"` +} + +// * `NOTEBOOK`: Notebook view item. * `DASHBOARD`: Dashboard view item. +type ViewType string + +// Dashboard view item. +const ViewTypeDashboard ViewType = `DASHBOARD` + +// Notebook view item. +const ViewTypeNotebook ViewType = `NOTEBOOK` + +// String representation for [fmt.Print] +func (f *ViewType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ViewType) Set(v string) error { + switch v { + case `DASHBOARD`, `NOTEBOOK`: + *f = ViewType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DASHBOARD", "NOTEBOOK"`, v) + } +} + +// Type always returns ViewType to satisfy [pflag.Value] interface +func (f *ViewType) Type() string { + return "ViewType" +} + +// * `CODE`: Code view of the notebook. * `DASHBOARDS`: All dashboard views of +// the notebook. * `ALL`: All views of the notebook. +type ViewsToExport string + +// All views of the notebook. +const ViewsToExportAll ViewsToExport = `ALL` + +// Code view of the notebook. +const ViewsToExportCode ViewsToExport = `CODE` + +// All dashboard views of the notebook. +const ViewsToExportDashboards ViewsToExport = `DASHBOARDS` + +// String representation for [fmt.Print] +func (f *ViewsToExport) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ViewsToExport) Set(v string) error { + switch v { + case `ALL`, `CODE`, `DASHBOARDS`: + *f = ViewsToExport(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALL", "CODE", "DASHBOARDS"`, v) + } +} + +// Type always returns ViewsToExport to satisfy [pflag.Value] interface +func (f *ViewsToExport) Type() string { + return "ViewsToExport" +} + +type Webhook struct { + Id types.String `tfsdk:"id"` +} + +type WebhookNotifications struct { + // An optional list of system notification IDs to call when the duration of + // a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` + // metric in the `health` field. A maximum of 3 destinations can be + // specified for the `on_duration_warning_threshold_exceeded` property. + OnDurationWarningThresholdExceeded []Webhook `tfsdk:"on_duration_warning_threshold_exceeded"` + // An optional list of system notification IDs to call when the run fails. A + // maximum of 3 destinations can be specified for the `on_failure` property. + OnFailure []Webhook `tfsdk:"on_failure"` + // An optional list of system notification IDs to call when the run starts. + // A maximum of 3 destinations can be specified for the `on_start` property. + OnStart []Webhook `tfsdk:"on_start"` + // An optional list of system notification IDs to call when any streaming + // backlog thresholds are exceeded for any stream. Streaming backlog + // thresholds can be set in the `health` field using the following metrics: + // `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, + // `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`. Alerting is + // based on the 10-minute average of these metrics. If the issue persists, + // notifications are resent every 30 minutes. A maximum of 3 destinations + // can be specified for the `on_streaming_backlog_exceeded` property. + OnStreamingBacklogExceeded []Webhook `tfsdk:"on_streaming_backlog_exceeded"` + // An optional list of system notification IDs to call when the run + // completes successfully. A maximum of 3 destinations can be specified for + // the `on_success` property. + OnSuccess []Webhook `tfsdk:"on_success"` +} diff --git a/service/marketplace_tf/model.go b/service/marketplace_tf/model.go new file mode 100755 index 0000000000..88b6af4461 --- /dev/null +++ b/service/marketplace_tf/model.go @@ -0,0 +1,1619 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package marketplace_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AddExchangeForListingRequest struct { + ExchangeId types.String `tfsdk:"exchange_id"` + + ListingId types.String `tfsdk:"listing_id"` +} + +type AddExchangeForListingResponse struct { + ExchangeForListing *ExchangeListing `tfsdk:"exchange_for_listing"` +} + +type AssetType string + +const AssetTypeAssetTypeDataTable AssetType = `ASSET_TYPE_DATA_TABLE` + +const AssetTypeAssetTypeGitRepo AssetType = `ASSET_TYPE_GIT_REPO` + +const AssetTypeAssetTypeMedia AssetType = `ASSET_TYPE_MEDIA` + +const AssetTypeAssetTypeModel AssetType = `ASSET_TYPE_MODEL` + +const AssetTypeAssetTypeNotebook AssetType = `ASSET_TYPE_NOTEBOOK` + +const AssetTypeAssetTypeUnspecified AssetType = `ASSET_TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *AssetType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AssetType) Set(v string) error { + switch v { + case `ASSET_TYPE_DATA_TABLE`, `ASSET_TYPE_GIT_REPO`, `ASSET_TYPE_MEDIA`, `ASSET_TYPE_MODEL`, `ASSET_TYPE_NOTEBOOK`, `ASSET_TYPE_UNSPECIFIED`: + *f = AssetType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ASSET_TYPE_DATA_TABLE", "ASSET_TYPE_GIT_REPO", "ASSET_TYPE_MEDIA", "ASSET_TYPE_MODEL", "ASSET_TYPE_NOTEBOOK", "ASSET_TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns AssetType to satisfy [pflag.Value] interface +func (f *AssetType) Type() string { + return "AssetType" +} + +// Get one batch of listings. One may specify up to 50 IDs per request. +type BatchGetListingsRequest struct { + Ids []types.String `tfsdk:"-" url:"ids,omitempty"` +} + +type BatchGetListingsResponse struct { + Listings []Listing `tfsdk:"listings"` +} + +// Get one batch of providers. One may specify up to 50 IDs per request. +type BatchGetProvidersRequest struct { + Ids []types.String `tfsdk:"-" url:"ids,omitempty"` +} + +type BatchGetProvidersResponse struct { + Providers []ProviderInfo `tfsdk:"providers"` +} + +type Category string + +const CategoryAdvertisingAndMarketing Category = `ADVERTISING_AND_MARKETING` + +const CategoryClimateAndEnvironment Category = `CLIMATE_AND_ENVIRONMENT` + +const CategoryCommerce Category = `COMMERCE` + +const CategoryDemographics Category = `DEMOGRAPHICS` + +const CategoryEconomics Category = `ECONOMICS` + +const CategoryEducation Category = `EDUCATION` + +const CategoryEnergy Category = `ENERGY` + +const CategoryFinancial Category = `FINANCIAL` + +const CategoryGaming Category = `GAMING` + +const CategoryGeospatial Category = `GEOSPATIAL` + +const CategoryHealth Category = `HEALTH` + +const CategoryLookupTables Category = `LOOKUP_TABLES` + +const CategoryManufacturing Category = `MANUFACTURING` + +const CategoryMedia Category = `MEDIA` + +const CategoryOther Category = `OTHER` + +const CategoryPublicSector Category = `PUBLIC_SECTOR` + +const CategoryRetail Category = `RETAIL` + +const CategoryScienceAndResearch Category = `SCIENCE_AND_RESEARCH` + +const CategorySecurity Category = `SECURITY` + +const CategorySports Category = `SPORTS` + +const CategoryTransportationAndLogistics Category = `TRANSPORTATION_AND_LOGISTICS` + +const CategoryTravelAndTourism Category = `TRAVEL_AND_TOURISM` + +// String representation for [fmt.Print] +func (f *Category) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Category) Set(v string) error { + switch v { + case `ADVERTISING_AND_MARKETING`, `CLIMATE_AND_ENVIRONMENT`, `COMMERCE`, `DEMOGRAPHICS`, `ECONOMICS`, `EDUCATION`, `ENERGY`, `FINANCIAL`, `GAMING`, `GEOSPATIAL`, `HEALTH`, `LOOKUP_TABLES`, `MANUFACTURING`, `MEDIA`, `OTHER`, `PUBLIC_SECTOR`, `RETAIL`, `SCIENCE_AND_RESEARCH`, `SECURITY`, `SPORTS`, `TRANSPORTATION_AND_LOGISTICS`, `TRAVEL_AND_TOURISM`: + *f = Category(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ADVERTISING_AND_MARKETING", "CLIMATE_AND_ENVIRONMENT", "COMMERCE", "DEMOGRAPHICS", "ECONOMICS", "EDUCATION", "ENERGY", "FINANCIAL", "GAMING", "GEOSPATIAL", "HEALTH", "LOOKUP_TABLES", "MANUFACTURING", "MEDIA", "OTHER", "PUBLIC_SECTOR", "RETAIL", "SCIENCE_AND_RESEARCH", "SECURITY", "SPORTS", "TRANSPORTATION_AND_LOGISTICS", "TRAVEL_AND_TOURISM"`, v) + } +} + +// Type always returns Category to satisfy [pflag.Value] interface +func (f *Category) Type() string { + return "Category" +} + +type ConsumerTerms struct { + Version types.String `tfsdk:"version"` +} + +// contact info for the consumer requesting data or performing a listing +// installation +type ContactInfo struct { + Company types.String `tfsdk:"company"` + + Email types.String `tfsdk:"email"` + + FirstName types.String `tfsdk:"first_name"` + + LastName types.String `tfsdk:"last_name"` +} + +type Cost string + +const CostFree Cost = `FREE` + +const CostPaid Cost = `PAID` + +// String representation for [fmt.Print] +func (f *Cost) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Cost) Set(v string) error { + switch v { + case `FREE`, `PAID`: + *f = Cost(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FREE", "PAID"`, v) + } +} + +// Type always returns Cost to satisfy [pflag.Value] interface +func (f *Cost) Type() string { + return "Cost" +} + +type CreateExchangeFilterRequest struct { + Filter ExchangeFilter `tfsdk:"filter"` +} + +type CreateExchangeFilterResponse struct { + FilterId types.String `tfsdk:"filter_id"` +} + +type CreateExchangeRequest struct { + Exchange Exchange `tfsdk:"exchange"` +} + +type CreateExchangeResponse struct { + ExchangeId types.String `tfsdk:"exchange_id"` +} + +type CreateFileRequest struct { + DisplayName types.String `tfsdk:"display_name"` + + FileParent FileParent `tfsdk:"file_parent"` + + MarketplaceFileType MarketplaceFileType `tfsdk:"marketplace_file_type"` + + MimeType types.String `tfsdk:"mime_type"` +} + +type CreateFileResponse struct { + FileInfo *FileInfo `tfsdk:"file_info"` + // Pre-signed POST URL to blob storage + UploadUrl types.String `tfsdk:"upload_url"` +} + +type CreateInstallationRequest struct { + AcceptedConsumerTerms *ConsumerTerms `tfsdk:"accepted_consumer_terms"` + + CatalogName types.String `tfsdk:"catalog_name"` + + ListingId types.String `tfsdk:"-" url:"-"` + + RecipientType DeltaSharingRecipientType `tfsdk:"recipient_type"` + // for git repo installations + RepoDetail *RepoInstallation `tfsdk:"repo_detail"` + + ShareName types.String `tfsdk:"share_name"` +} + +type CreateListingRequest struct { + Listing Listing `tfsdk:"listing"` +} + +type CreateListingResponse struct { + ListingId types.String `tfsdk:"listing_id"` +} + +// Data request messages also creates a lead (maybe) +type CreatePersonalizationRequest struct { + AcceptedConsumerTerms ConsumerTerms `tfsdk:"accepted_consumer_terms"` + + Comment types.String `tfsdk:"comment"` + + Company types.String `tfsdk:"company"` + + FirstName types.String `tfsdk:"first_name"` + + IntendedUse types.String `tfsdk:"intended_use"` + + IsFromLighthouse types.Bool `tfsdk:"is_from_lighthouse"` + + LastName types.String `tfsdk:"last_name"` + + ListingId types.String `tfsdk:"-" url:"-"` + + RecipientType DeltaSharingRecipientType `tfsdk:"recipient_type"` +} + +type CreatePersonalizationRequestResponse struct { + Id types.String `tfsdk:"id"` +} + +type CreateProviderRequest struct { + Provider ProviderInfo `tfsdk:"provider"` +} + +type CreateProviderResponse struct { + Id types.String `tfsdk:"id"` +} + +type DataRefresh string + +const DataRefreshDaily DataRefresh = `DAILY` + +const DataRefreshHourly DataRefresh = `HOURLY` + +const DataRefreshMinute DataRefresh = `MINUTE` + +const DataRefreshMonthly DataRefresh = `MONTHLY` + +const DataRefreshNone DataRefresh = `NONE` + +const DataRefreshQuarterly DataRefresh = `QUARTERLY` + +const DataRefreshSecond DataRefresh = `SECOND` + +const DataRefreshWeekly DataRefresh = `WEEKLY` + +const DataRefreshYearly DataRefresh = `YEARLY` + +// String representation for [fmt.Print] +func (f *DataRefresh) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DataRefresh) Set(v string) error { + switch v { + case `DAILY`, `HOURLY`, `MINUTE`, `MONTHLY`, `NONE`, `QUARTERLY`, `SECOND`, `WEEKLY`, `YEARLY`: + *f = DataRefresh(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DAILY", "HOURLY", "MINUTE", "MONTHLY", "NONE", "QUARTERLY", "SECOND", "WEEKLY", "YEARLY"`, v) + } +} + +// Type always returns DataRefresh to satisfy [pflag.Value] interface +func (f *DataRefresh) Type() string { + return "DataRefresh" +} + +type DataRefreshInfo struct { + Interval types.Int64 `tfsdk:"interval"` + + Unit DataRefresh `tfsdk:"unit"` +} + +// Delete an exchange filter +type DeleteExchangeFilterRequest struct { + Id types.String `tfsdk:"-" url:"-"` +} + +type DeleteExchangeFilterResponse struct { +} + +// Delete an exchange +type DeleteExchangeRequest struct { + Id types.String `tfsdk:"-" url:"-"` +} + +type DeleteExchangeResponse struct { +} + +// Delete a file +type DeleteFileRequest struct { + FileId types.String `tfsdk:"-" url:"-"` +} + +type DeleteFileResponse struct { +} + +// Uninstall from a listing +type DeleteInstallationRequest struct { + InstallationId types.String `tfsdk:"-" url:"-"` + + ListingId types.String `tfsdk:"-" url:"-"` +} + +type DeleteInstallationResponse struct { +} + +// Delete a listing +type DeleteListingRequest struct { + Id types.String `tfsdk:"-" url:"-"` +} + +type DeleteListingResponse struct { +} + +// Delete provider +type DeleteProviderRequest struct { + Id types.String `tfsdk:"-" url:"-"` +} + +type DeleteProviderResponse struct { +} + +type DeltaSharingRecipientType string + +const DeltaSharingRecipientTypeDeltaSharingRecipientTypeDatabricks DeltaSharingRecipientType = `DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS` + +const DeltaSharingRecipientTypeDeltaSharingRecipientTypeOpen DeltaSharingRecipientType = `DELTA_SHARING_RECIPIENT_TYPE_OPEN` + +// String representation for [fmt.Print] +func (f *DeltaSharingRecipientType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeltaSharingRecipientType) Set(v string) error { + switch v { + case `DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS`, `DELTA_SHARING_RECIPIENT_TYPE_OPEN`: + *f = DeltaSharingRecipientType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS", "DELTA_SHARING_RECIPIENT_TYPE_OPEN"`, v) + } +} + +// Type always returns DeltaSharingRecipientType to satisfy [pflag.Value] interface +func (f *DeltaSharingRecipientType) Type() string { + return "DeltaSharingRecipientType" +} + +type Exchange struct { + Comment types.String `tfsdk:"comment"` + + CreatedAt types.Int64 `tfsdk:"created_at"` + + CreatedBy types.String `tfsdk:"created_by"` + + Filters []ExchangeFilter `tfsdk:"filters"` + + Id types.String `tfsdk:"id"` + + LinkedListings []ExchangeListing `tfsdk:"linked_listings"` + + Name types.String `tfsdk:"name"` + + UpdatedAt types.Int64 `tfsdk:"updated_at"` + + UpdatedBy types.String `tfsdk:"updated_by"` +} + +type ExchangeFilter struct { + CreatedAt types.Int64 `tfsdk:"created_at"` + + CreatedBy types.String `tfsdk:"created_by"` + + ExchangeId types.String `tfsdk:"exchange_id"` + + FilterType ExchangeFilterType `tfsdk:"filter_type"` + + FilterValue types.String `tfsdk:"filter_value"` + + Id types.String `tfsdk:"id"` + + Name types.String `tfsdk:"name"` + + UpdatedAt types.Int64 `tfsdk:"updated_at"` + + UpdatedBy types.String `tfsdk:"updated_by"` +} + +type ExchangeFilterType string + +const ExchangeFilterTypeGlobalMetastoreId ExchangeFilterType = `GLOBAL_METASTORE_ID` + +// String representation for [fmt.Print] +func (f *ExchangeFilterType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExchangeFilterType) Set(v string) error { + switch v { + case `GLOBAL_METASTORE_ID`: + *f = ExchangeFilterType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "GLOBAL_METASTORE_ID"`, v) + } +} + +// Type always returns ExchangeFilterType to satisfy [pflag.Value] interface +func (f *ExchangeFilterType) Type() string { + return "ExchangeFilterType" +} + +type ExchangeListing struct { + CreatedAt types.Int64 `tfsdk:"created_at"` + + CreatedBy types.String `tfsdk:"created_by"` + + ExchangeId types.String `tfsdk:"exchange_id"` + + ExchangeName types.String `tfsdk:"exchange_name"` + + Id types.String `tfsdk:"id"` + + ListingId types.String `tfsdk:"listing_id"` + + ListingName types.String `tfsdk:"listing_name"` +} + +type FileInfo struct { + CreatedAt types.Int64 `tfsdk:"created_at"` + // Name displayed to users for applicable files, e.g. embedded notebooks + DisplayName types.String `tfsdk:"display_name"` + + DownloadLink types.String `tfsdk:"download_link"` + + FileParent *FileParent `tfsdk:"file_parent"` + + Id types.String `tfsdk:"id"` + + MarketplaceFileType MarketplaceFileType `tfsdk:"marketplace_file_type"` + + MimeType types.String `tfsdk:"mime_type"` + + Status FileStatus `tfsdk:"status"` + // Populated if status is in a failed state with more information on reason + // for the failure. + StatusMessage types.String `tfsdk:"status_message"` + + UpdatedAt types.Int64 `tfsdk:"updated_at"` +} + +type FileParent struct { + FileParentType FileParentType `tfsdk:"file_parent_type"` + // TODO make the following fields required + ParentId types.String `tfsdk:"parent_id"` +} + +type FileParentType string + +const FileParentTypeListing FileParentType = `LISTING` + +const FileParentTypeProvider FileParentType = `PROVIDER` + +// String representation for [fmt.Print] +func (f *FileParentType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FileParentType) Set(v string) error { + switch v { + case `LISTING`, `PROVIDER`: + *f = FileParentType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "LISTING", "PROVIDER"`, v) + } +} + +// Type always returns FileParentType to satisfy [pflag.Value] interface +func (f *FileParentType) Type() string { + return "FileParentType" +} + +type FileStatus string + +const FileStatusFileStatusPublished FileStatus = `FILE_STATUS_PUBLISHED` + +const FileStatusFileStatusSanitizationFailed FileStatus = `FILE_STATUS_SANITIZATION_FAILED` + +const FileStatusFileStatusSanitizing FileStatus = `FILE_STATUS_SANITIZING` + +const FileStatusFileStatusStaging FileStatus = `FILE_STATUS_STAGING` + +// String representation for [fmt.Print] +func (f *FileStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FileStatus) Set(v string) error { + switch v { + case `FILE_STATUS_PUBLISHED`, `FILE_STATUS_SANITIZATION_FAILED`, `FILE_STATUS_SANITIZING`, `FILE_STATUS_STAGING`: + *f = FileStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FILE_STATUS_PUBLISHED", "FILE_STATUS_SANITIZATION_FAILED", "FILE_STATUS_SANITIZING", "FILE_STATUS_STAGING"`, v) + } +} + +// Type always returns FileStatus to satisfy [pflag.Value] interface +func (f *FileStatus) Type() string { + return "FileStatus" +} + +type FilterType string + +const FilterTypeMetastore FilterType = `METASTORE` + +// String representation for [fmt.Print] +func (f *FilterType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FilterType) Set(v string) error { + switch v { + case `METASTORE`: + *f = FilterType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "METASTORE"`, v) + } +} + +// Type always returns FilterType to satisfy [pflag.Value] interface +func (f *FilterType) Type() string { + return "FilterType" +} + +type FulfillmentType string + +const FulfillmentTypeInstall FulfillmentType = `INSTALL` + +const FulfillmentTypeRequestAccess FulfillmentType = `REQUEST_ACCESS` + +// String representation for [fmt.Print] +func (f *FulfillmentType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FulfillmentType) Set(v string) error { + switch v { + case `INSTALL`, `REQUEST_ACCESS`: + *f = FulfillmentType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "INSTALL", "REQUEST_ACCESS"`, v) + } +} + +// Type always returns FulfillmentType to satisfy [pflag.Value] interface +func (f *FulfillmentType) Type() string { + return "FulfillmentType" +} + +// Get an exchange +type GetExchangeRequest struct { + Id types.String `tfsdk:"-" url:"-"` +} + +type GetExchangeResponse struct { + Exchange *Exchange `tfsdk:"exchange"` +} + +// Get a file +type GetFileRequest struct { + FileId types.String `tfsdk:"-" url:"-"` +} + +type GetFileResponse struct { + FileInfo *FileInfo `tfsdk:"file_info"` +} + +type GetLatestVersionProviderAnalyticsDashboardResponse struct { + // version here is latest logical version of the dashboard template + Version types.Int64 `tfsdk:"version"` +} + +// Get listing content metadata +type GetListingContentMetadataRequest struct { + ListingId types.String `tfsdk:"-" url:"-"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type GetListingContentMetadataResponse struct { + NextPageToken types.String `tfsdk:"next_page_token"` + + SharedDataObjects []SharedDataObject `tfsdk:"shared_data_objects"` +} + +// Get listing +type GetListingRequest struct { + Id types.String `tfsdk:"-" url:"-"` +} + +type GetListingResponse struct { + Listing *Listing `tfsdk:"listing"` +} + +// List listings +type GetListingsRequest struct { + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type GetListingsResponse struct { + Listings []Listing `tfsdk:"listings"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// Get the personalization request for a listing +type GetPersonalizationRequestRequest struct { + ListingId types.String `tfsdk:"-" url:"-"` +} + +type GetPersonalizationRequestResponse struct { + PersonalizationRequests []PersonalizationRequest `tfsdk:"personalization_requests"` +} + +// Get a provider +type GetProviderRequest struct { + Id types.String `tfsdk:"-" url:"-"` +} + +type GetProviderResponse struct { + Provider *ProviderInfo `tfsdk:"provider"` +} + +type Installation struct { + Installation *InstallationDetail `tfsdk:"installation"` +} + +type InstallationDetail struct { + CatalogName types.String `tfsdk:"catalog_name"` + + ErrorMessage types.String `tfsdk:"error_message"` + + Id types.String `tfsdk:"id"` + + InstalledOn types.Int64 `tfsdk:"installed_on"` + + ListingId types.String `tfsdk:"listing_id"` + + ListingName types.String `tfsdk:"listing_name"` + + RecipientType DeltaSharingRecipientType `tfsdk:"recipient_type"` + + RepoName types.String `tfsdk:"repo_name"` + + RepoPath types.String `tfsdk:"repo_path"` + + ShareName types.String `tfsdk:"share_name"` + + Status InstallationStatus `tfsdk:"status"` + + TokenDetail *TokenDetail `tfsdk:"token_detail"` + + Tokens []TokenInfo `tfsdk:"tokens"` +} + +type InstallationStatus string + +const InstallationStatusFailed InstallationStatus = `FAILED` + +const InstallationStatusInstalled InstallationStatus = `INSTALLED` + +// String representation for [fmt.Print] +func (f *InstallationStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *InstallationStatus) Set(v string) error { + switch v { + case `FAILED`, `INSTALLED`: + *f = InstallationStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED", "INSTALLED"`, v) + } +} + +// Type always returns InstallationStatus to satisfy [pflag.Value] interface +func (f *InstallationStatus) Type() string { + return "InstallationStatus" +} + +// List all installations +type ListAllInstallationsRequest struct { + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListAllInstallationsResponse struct { + Installations []InstallationDetail `tfsdk:"installations"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List all personalization requests +type ListAllPersonalizationRequestsRequest struct { + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListAllPersonalizationRequestsResponse struct { + NextPageToken types.String `tfsdk:"next_page_token"` + + PersonalizationRequests []PersonalizationRequest `tfsdk:"personalization_requests"` +} + +// List exchange filters +type ListExchangeFiltersRequest struct { + ExchangeId types.String `tfsdk:"-" url:"exchange_id"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListExchangeFiltersResponse struct { + Filters []ExchangeFilter `tfsdk:"filters"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List exchanges for listing +type ListExchangesForListingRequest struct { + ListingId types.String `tfsdk:"-" url:"listing_id"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListExchangesForListingResponse struct { + ExchangeListing []ExchangeListing `tfsdk:"exchange_listing"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List exchanges +type ListExchangesRequest struct { + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListExchangesResponse struct { + Exchanges []Exchange `tfsdk:"exchanges"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List files +type ListFilesRequest struct { + FileParent FileParent `tfsdk:"-" url:"file_parent"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListFilesResponse struct { + FileInfos []FileInfo `tfsdk:"file_infos"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List all listing fulfillments +type ListFulfillmentsRequest struct { + ListingId types.String `tfsdk:"-" url:"-"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListFulfillmentsResponse struct { + Fulfillments []ListingFulfillment `tfsdk:"fulfillments"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List installations for a listing +type ListInstallationsRequest struct { + ListingId types.String `tfsdk:"-" url:"-"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListInstallationsResponse struct { + Installations []InstallationDetail `tfsdk:"installations"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List listings for exchange +type ListListingsForExchangeRequest struct { + ExchangeId types.String `tfsdk:"-" url:"exchange_id"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListListingsForExchangeResponse struct { + ExchangeListings []ExchangeListing `tfsdk:"exchange_listings"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List listings +type ListListingsRequest struct { + // Matches any of the following asset types + Assets []AssetType `tfsdk:"-" url:"assets,omitempty"` + // Matches any of the following categories + Categories []Category `tfsdk:"-" url:"categories,omitempty"` + + IsAscending types.Bool `tfsdk:"-" url:"is_ascending,omitempty"` + // Filters each listing based on if it is free. + IsFree types.Bool `tfsdk:"-" url:"is_free,omitempty"` + // Filters each listing based on if it is a private exchange. + IsPrivateExchange types.Bool `tfsdk:"-" url:"is_private_exchange,omitempty"` + // Filters each listing based on whether it is a staff pick. + IsStaffPick types.Bool `tfsdk:"-" url:"is_staff_pick,omitempty"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // Matches any of the following provider ids + ProviderIds []types.String `tfsdk:"-" url:"provider_ids,omitempty"` + // Criteria for sorting the resulting set of listings. + SortBy SortBy `tfsdk:"-" url:"sort_by,omitempty"` + // Matches any of the following tags + Tags []ListingTag `tfsdk:"-" url:"tags,omitempty"` +} + +type ListListingsResponse struct { + Listings []Listing `tfsdk:"listings"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +type ListProviderAnalyticsDashboardResponse struct { + // dashboard_id will be used to open Lakeview dashboard. + DashboardId types.String `tfsdk:"dashboard_id"` + + Id types.String `tfsdk:"id"` + + Version types.Int64 `tfsdk:"version"` +} + +// List providers +type ListProvidersRequest struct { + IsFeatured types.Bool `tfsdk:"-" url:"is_featured,omitempty"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListProvidersResponse struct { + NextPageToken types.String `tfsdk:"next_page_token"` + + Providers []ProviderInfo `tfsdk:"providers"` +} + +type Listing struct { + Detail *ListingDetail `tfsdk:"detail"` + + Id types.String `tfsdk:"id"` + // we can not use just ProviderListingSummary since we already have same + // name on entity side of the state + ProviderSummary *ProviderListingSummaryInfo `tfsdk:"provider_summary"` + // Next Number: 26 + Summary ListingSummary `tfsdk:"summary"` +} + +type ListingDetail struct { + // Type of assets included in the listing. eg. GIT_REPO, DATA_TABLE, MODEL, + // NOTEBOOK + Assets []AssetType `tfsdk:"assets"` + // The ending date timestamp for when the data spans + CollectionDateEnd types.Int64 `tfsdk:"collection_date_end"` + // The starting date timestamp for when the data spans + CollectionDateStart types.Int64 `tfsdk:"collection_date_start"` + // Smallest unit of time in the dataset + CollectionGranularity *DataRefreshInfo `tfsdk:"collection_granularity"` + // Whether the dataset is free or paid + Cost Cost `tfsdk:"cost"` + // Where/how the data is sourced + DataSource types.String `tfsdk:"data_source"` + + Description types.String `tfsdk:"description"` + + DocumentationLink types.String `tfsdk:"documentation_link"` + + EmbeddedNotebookFileInfos []FileInfo `tfsdk:"embedded_notebook_file_infos"` + + FileIds []types.String `tfsdk:"file_ids"` + // Which geo region the listing data is collected from + GeographicalCoverage types.String `tfsdk:"geographical_coverage"` + // ID 20, 21 removed don't use License of the data asset - Required for + // listings with model based assets + License types.String `tfsdk:"license"` + // What the pricing model is (e.g. paid, subscription, paid upfront); should + // only be present if cost is paid TODO: Not used yet, should deprecate if + // we will never use it + PricingModel types.String `tfsdk:"pricing_model"` + + PrivacyPolicyLink types.String `tfsdk:"privacy_policy_link"` + // size of the dataset in GB + Size types.Float64 `tfsdk:"size"` + + SupportLink types.String `tfsdk:"support_link"` + // Listing tags - Simple key value pair to annotate listings. When should I + // use tags vs dedicated fields? Using tags avoids the need to add new + // columns in the database for new annotations. However, this should be used + // sparingly since tags are stored as key value pair. Use tags only: 1. If + // the field is optional and won't need to have NOT NULL integrity check 2. + // The value is fairly fixed, static and low cardinality (eg. enums). 3. The + // value won't be used in filters or joins with other tables. + Tags []ListingTag `tfsdk:"tags"` + + TermsOfService types.String `tfsdk:"terms_of_service"` + // How often data is updated + UpdateFrequency *DataRefreshInfo `tfsdk:"update_frequency"` +} + +type ListingFulfillment struct { + FulfillmentType FulfillmentType `tfsdk:"fulfillment_type"` + + ListingId types.String `tfsdk:"listing_id"` + + RecipientType DeltaSharingRecipientType `tfsdk:"recipient_type"` + + RepoInfo *RepoInfo `tfsdk:"repo_info"` + + ShareInfo *ShareInfo `tfsdk:"share_info"` +} + +type ListingSetting struct { + // filters are joined with `or` conjunction. + Filters []VisibilityFilter `tfsdk:"filters"` + + Visibility Visibility `tfsdk:"visibility"` +} + +type ListingShareType string + +const ListingShareTypeFull ListingShareType = `FULL` + +const ListingShareTypeSample ListingShareType = `SAMPLE` + +// String representation for [fmt.Print] +func (f *ListingShareType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListingShareType) Set(v string) error { + switch v { + case `FULL`, `SAMPLE`: + *f = ListingShareType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FULL", "SAMPLE"`, v) + } +} + +// Type always returns ListingShareType to satisfy [pflag.Value] interface +func (f *ListingShareType) Type() string { + return "ListingShareType" +} + +// Enums +type ListingStatus string + +const ListingStatusDraft ListingStatus = `DRAFT` + +const ListingStatusPending ListingStatus = `PENDING` + +const ListingStatusPublished ListingStatus = `PUBLISHED` + +const ListingStatusSuspended ListingStatus = `SUSPENDED` + +// String representation for [fmt.Print] +func (f *ListingStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListingStatus) Set(v string) error { + switch v { + case `DRAFT`, `PENDING`, `PUBLISHED`, `SUSPENDED`: + *f = ListingStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DRAFT", "PENDING", "PUBLISHED", "SUSPENDED"`, v) + } +} + +// Type always returns ListingStatus to satisfy [pflag.Value] interface +func (f *ListingStatus) Type() string { + return "ListingStatus" +} + +// Next Number: 26 +type ListingSummary struct { + Categories []Category `tfsdk:"categories"` + + CreatedAt types.Int64 `tfsdk:"created_at"` + + CreatedBy types.String `tfsdk:"created_by"` + + CreatedById types.Int64 `tfsdk:"created_by_id"` + + ExchangeIds []types.String `tfsdk:"exchange_ids"` + // if a git repo is being created, a listing will be initialized with this + // field as opposed to a share + GitRepo *RepoInfo `tfsdk:"git_repo"` + + ListingType ListingType `tfsdk:"listingType"` + + MetastoreId types.String `tfsdk:"metastore_id"` + + Name types.String `tfsdk:"name"` + + ProviderId types.String `tfsdk:"provider_id"` + + ProviderRegion *RegionInfo `tfsdk:"provider_region"` + + PublishedAt types.Int64 `tfsdk:"published_at"` + + PublishedBy types.String `tfsdk:"published_by"` + + Setting *ListingSetting `tfsdk:"setting"` + + Share *ShareInfo `tfsdk:"share"` + // Enums + Status ListingStatus `tfsdk:"status"` + + Subtitle types.String `tfsdk:"subtitle"` + + UpdatedAt types.Int64 `tfsdk:"updated_at"` + + UpdatedBy types.String `tfsdk:"updated_by"` + + UpdatedById types.Int64 `tfsdk:"updated_by_id"` +} + +type ListingTag struct { + // Tag name (enum) + TagName ListingTagType `tfsdk:"tag_name"` + // String representation of the tag value. Values should be string literals + // (no complex types) + TagValues []types.String `tfsdk:"tag_values"` +} + +type ListingTagType string + +const ListingTagTypeListingTagTypeLanguage ListingTagType = `LISTING_TAG_TYPE_LANGUAGE` + +const ListingTagTypeListingTagTypeTask ListingTagType = `LISTING_TAG_TYPE_TASK` + +const ListingTagTypeListingTagTypeUnspecified ListingTagType = `LISTING_TAG_TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *ListingTagType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListingTagType) Set(v string) error { + switch v { + case `LISTING_TAG_TYPE_LANGUAGE`, `LISTING_TAG_TYPE_TASK`, `LISTING_TAG_TYPE_UNSPECIFIED`: + *f = ListingTagType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "LISTING_TAG_TYPE_LANGUAGE", "LISTING_TAG_TYPE_TASK", "LISTING_TAG_TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns ListingTagType to satisfy [pflag.Value] interface +func (f *ListingTagType) Type() string { + return "ListingTagType" +} + +type ListingType string + +const ListingTypePersonalized ListingType = `PERSONALIZED` + +const ListingTypeStandard ListingType = `STANDARD` + +// String representation for [fmt.Print] +func (f *ListingType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListingType) Set(v string) error { + switch v { + case `PERSONALIZED`, `STANDARD`: + *f = ListingType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PERSONALIZED", "STANDARD"`, v) + } +} + +// Type always returns ListingType to satisfy [pflag.Value] interface +func (f *ListingType) Type() string { + return "ListingType" +} + +type MarketplaceFileType string + +const MarketplaceFileTypeEmbeddedNotebook MarketplaceFileType = `EMBEDDED_NOTEBOOK` + +const MarketplaceFileTypeProviderIcon MarketplaceFileType = `PROVIDER_ICON` + +// String representation for [fmt.Print] +func (f *MarketplaceFileType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MarketplaceFileType) Set(v string) error { + switch v { + case `EMBEDDED_NOTEBOOK`, `PROVIDER_ICON`: + *f = MarketplaceFileType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EMBEDDED_NOTEBOOK", "PROVIDER_ICON"`, v) + } +} + +// Type always returns MarketplaceFileType to satisfy [pflag.Value] interface +func (f *MarketplaceFileType) Type() string { + return "MarketplaceFileType" +} + +type PersonalizationRequest struct { + Comment types.String `tfsdk:"comment"` + + ConsumerRegion RegionInfo `tfsdk:"consumer_region"` + // contact info for the consumer requesting data or performing a listing + // installation + ContactInfo *ContactInfo `tfsdk:"contact_info"` + + CreatedAt types.Int64 `tfsdk:"created_at"` + + Id types.String `tfsdk:"id"` + + IntendedUse types.String `tfsdk:"intended_use"` + + IsFromLighthouse types.Bool `tfsdk:"is_from_lighthouse"` + + ListingId types.String `tfsdk:"listing_id"` + + ListingName types.String `tfsdk:"listing_name"` + + MetastoreId types.String `tfsdk:"metastore_id"` + + ProviderId types.String `tfsdk:"provider_id"` + + RecipientType DeltaSharingRecipientType `tfsdk:"recipient_type"` + + Share *ShareInfo `tfsdk:"share"` + + Status PersonalizationRequestStatus `tfsdk:"status"` + + StatusMessage types.String `tfsdk:"status_message"` + + UpdatedAt types.Int64 `tfsdk:"updated_at"` +} + +type PersonalizationRequestStatus string + +const PersonalizationRequestStatusDenied PersonalizationRequestStatus = `DENIED` + +const PersonalizationRequestStatusFulfilled PersonalizationRequestStatus = `FULFILLED` + +const PersonalizationRequestStatusNew PersonalizationRequestStatus = `NEW` + +const PersonalizationRequestStatusRequestPending PersonalizationRequestStatus = `REQUEST_PENDING` + +// String representation for [fmt.Print] +func (f *PersonalizationRequestStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PersonalizationRequestStatus) Set(v string) error { + switch v { + case `DENIED`, `FULFILLED`, `NEW`, `REQUEST_PENDING`: + *f = PersonalizationRequestStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DENIED", "FULFILLED", "NEW", "REQUEST_PENDING"`, v) + } +} + +// Type always returns PersonalizationRequestStatus to satisfy [pflag.Value] interface +func (f *PersonalizationRequestStatus) Type() string { + return "PersonalizationRequestStatus" +} + +type ProviderAnalyticsDashboard struct { + Id types.String `tfsdk:"id"` +} + +type ProviderIconFile struct { + IconFileId types.String `tfsdk:"icon_file_id"` + + IconFilePath types.String `tfsdk:"icon_file_path"` + + IconType ProviderIconType `tfsdk:"icon_type"` +} + +type ProviderIconType string + +const ProviderIconTypeDark ProviderIconType = `DARK` + +const ProviderIconTypePrimary ProviderIconType = `PRIMARY` + +const ProviderIconTypeProviderIconTypeUnspecified ProviderIconType = `PROVIDER_ICON_TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *ProviderIconType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ProviderIconType) Set(v string) error { + switch v { + case `DARK`, `PRIMARY`, `PROVIDER_ICON_TYPE_UNSPECIFIED`: + *f = ProviderIconType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DARK", "PRIMARY", "PROVIDER_ICON_TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns ProviderIconType to satisfy [pflag.Value] interface +func (f *ProviderIconType) Type() string { + return "ProviderIconType" +} + +type ProviderInfo struct { + BusinessContactEmail types.String `tfsdk:"business_contact_email"` + + CompanyWebsiteLink types.String `tfsdk:"company_website_link"` + + DarkModeIconFileId types.String `tfsdk:"dark_mode_icon_file_id"` + + DarkModeIconFilePath types.String `tfsdk:"dark_mode_icon_file_path"` + + Description types.String `tfsdk:"description"` + + IconFileId types.String `tfsdk:"icon_file_id"` + + IconFilePath types.String `tfsdk:"icon_file_path"` + + Id types.String `tfsdk:"id"` + // is_featured is accessible by consumers only + IsFeatured types.Bool `tfsdk:"is_featured"` + + Name types.String `tfsdk:"name"` + + PrivacyPolicyLink types.String `tfsdk:"privacy_policy_link"` + // published_by is only applicable to data aggregators (e.g. Crux) + PublishedBy types.String `tfsdk:"published_by"` + + SupportContactEmail types.String `tfsdk:"support_contact_email"` + + TermOfServiceLink types.String `tfsdk:"term_of_service_link"` +} + +// we can not use just ProviderListingSummary since we already have same name on +// entity side of the state +type ProviderListingSummaryInfo struct { + Description types.String `tfsdk:"description"` + + IconFiles []ProviderIconFile `tfsdk:"icon_files"` + + Name types.String `tfsdk:"name"` +} + +type RegionInfo struct { + Cloud types.String `tfsdk:"cloud"` + + Region types.String `tfsdk:"region"` +} + +// Remove an exchange for listing +type RemoveExchangeForListingRequest struct { + Id types.String `tfsdk:"-" url:"-"` +} + +type RemoveExchangeForListingResponse struct { +} + +type RepoInfo struct { + // the git repo url e.g. https://github.com/databrickslabs/dolly.git + GitRepoUrl types.String `tfsdk:"git_repo_url"` +} + +type RepoInstallation struct { + // the user-specified repo name for their installed git repo listing + RepoName types.String `tfsdk:"repo_name"` + // refers to the full url file path that navigates the user to the repo's + // entrypoint (e.g. a README.md file, or the repo file view in the unified + // UI) should just be a relative path + RepoPath types.String `tfsdk:"repo_path"` +} + +// Search listings +type SearchListingsRequest struct { + // Matches any of the following asset types + Assets []AssetType `tfsdk:"-" url:"assets,omitempty"` + // Matches any of the following categories + Categories []Category `tfsdk:"-" url:"categories,omitempty"` + + IsAscending types.Bool `tfsdk:"-" url:"is_ascending,omitempty"` + + IsFree types.Bool `tfsdk:"-" url:"is_free,omitempty"` + + IsPrivateExchange types.Bool `tfsdk:"-" url:"is_private_exchange,omitempty"` + + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // Matches any of the following provider ids + ProviderIds []types.String `tfsdk:"-" url:"provider_ids,omitempty"` + // Fuzzy matches query + Query types.String `tfsdk:"-" url:"query"` + + SortBy SortBy `tfsdk:"-" url:"sort_by,omitempty"` +} + +type SearchListingsResponse struct { + Listings []Listing `tfsdk:"listings"` + + NextPageToken types.String `tfsdk:"next_page_token"` +} + +type ShareInfo struct { + Name types.String `tfsdk:"name"` + + Type ListingShareType `tfsdk:"type"` +} + +type SharedDataObject struct { + // The type of the data object. Could be one of: TABLE, SCHEMA, + // NOTEBOOK_FILE, MODEL, VOLUME + DataObjectType types.String `tfsdk:"data_object_type"` + // Name of the shared object + Name types.String `tfsdk:"name"` +} + +type SortBy string + +const SortBySortByDate SortBy = `SORT_BY_DATE` + +const SortBySortByRelevance SortBy = `SORT_BY_RELEVANCE` + +const SortBySortByTitle SortBy = `SORT_BY_TITLE` + +const SortBySortByUnspecified SortBy = `SORT_BY_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *SortBy) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SortBy) Set(v string) error { + switch v { + case `SORT_BY_DATE`, `SORT_BY_RELEVANCE`, `SORT_BY_TITLE`, `SORT_BY_UNSPECIFIED`: + *f = SortBy(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "SORT_BY_DATE", "SORT_BY_RELEVANCE", "SORT_BY_TITLE", "SORT_BY_UNSPECIFIED"`, v) + } +} + +// Type always returns SortBy to satisfy [pflag.Value] interface +func (f *SortBy) Type() string { + return "SortBy" +} + +type TokenDetail struct { + BearerToken types.String `tfsdk:"bearerToken"` + + Endpoint types.String `tfsdk:"endpoint"` + + ExpirationTime types.String `tfsdk:"expirationTime"` + // These field names must follow the delta sharing protocol. Original + // message: RetrieveToken.Response in + // managed-catalog/api/messages/recipient.proto + ShareCredentialsVersion types.Int64 `tfsdk:"shareCredentialsVersion"` +} + +type TokenInfo struct { + // Full activation url to retrieve the access token. It will be empty if the + // token is already retrieved. + ActivationUrl types.String `tfsdk:"activation_url"` + // Time at which this Recipient Token was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of Recipient Token creator. + CreatedBy types.String `tfsdk:"created_by"` + // Expiration timestamp of the token in epoch milliseconds. + ExpirationTime types.Int64 `tfsdk:"expiration_time"` + // Unique id of the Recipient Token. + Id types.String `tfsdk:"id"` + // Time at which this Recipient Token was updated, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of Recipient Token updater. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +type UpdateExchangeFilterRequest struct { + Filter ExchangeFilter `tfsdk:"filter"` + + Id types.String `tfsdk:"-" url:"-"` +} + +type UpdateExchangeFilterResponse struct { + Filter *ExchangeFilter `tfsdk:"filter"` +} + +type UpdateExchangeRequest struct { + Exchange Exchange `tfsdk:"exchange"` + + Id types.String `tfsdk:"-" url:"-"` +} + +type UpdateExchangeResponse struct { + Exchange *Exchange `tfsdk:"exchange"` +} + +type UpdateInstallationRequest struct { + Installation InstallationDetail `tfsdk:"installation"` + + InstallationId types.String `tfsdk:"-" url:"-"` + + ListingId types.String `tfsdk:"-" url:"-"` + + RotateToken types.Bool `tfsdk:"rotate_token"` +} + +type UpdateInstallationResponse struct { + Installation *InstallationDetail `tfsdk:"installation"` +} + +type UpdateListingRequest struct { + Id types.String `tfsdk:"-" url:"-"` + + Listing Listing `tfsdk:"listing"` +} + +type UpdateListingResponse struct { + Listing *Listing `tfsdk:"listing"` +} + +type UpdatePersonalizationRequestRequest struct { + ListingId types.String `tfsdk:"-" url:"-"` + + Reason types.String `tfsdk:"reason"` + + RequestId types.String `tfsdk:"-" url:"-"` + + Share *ShareInfo `tfsdk:"share"` + + Status PersonalizationRequestStatus `tfsdk:"status"` +} + +type UpdatePersonalizationRequestResponse struct { + Request *PersonalizationRequest `tfsdk:"request"` +} + +type UpdateProviderAnalyticsDashboardRequest struct { + // id is immutable property and can't be updated. + Id types.String `tfsdk:"-" url:"-"` + // this is the version of the dashboard template we want to update our user + // to current expectation is that it should be equal to latest version of + // the dashboard template + Version types.Int64 `tfsdk:"version"` +} + +type UpdateProviderAnalyticsDashboardResponse struct { + // this is newly created Lakeview dashboard for the user + DashboardId types.String `tfsdk:"dashboard_id"` + // id & version should be the same as the request + Id types.String `tfsdk:"id"` + + Version types.Int64 `tfsdk:"version"` +} + +type UpdateProviderRequest struct { + Id types.String `tfsdk:"-" url:"-"` + + Provider ProviderInfo `tfsdk:"provider"` +} + +type UpdateProviderResponse struct { + Provider *ProviderInfo `tfsdk:"provider"` +} + +type Visibility string + +const VisibilityPrivate Visibility = `PRIVATE` + +const VisibilityPublic Visibility = `PUBLIC` + +// String representation for [fmt.Print] +func (f *Visibility) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Visibility) Set(v string) error { + switch v { + case `PRIVATE`, `PUBLIC`: + *f = Visibility(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PRIVATE", "PUBLIC"`, v) + } +} + +// Type always returns Visibility to satisfy [pflag.Value] interface +func (f *Visibility) Type() string { + return "Visibility" +} + +type VisibilityFilter struct { + FilterType FilterType `tfsdk:"filterType"` + + FilterValue types.String `tfsdk:"filterValue"` +} diff --git a/service/ml_tf/model.go b/service/ml_tf/model.go new file mode 100755 index 0000000000..f0a9acb9c5 --- /dev/null +++ b/service/ml_tf/model.go @@ -0,0 +1,2257 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package ml_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Activity recorded for the action. +type Activity struct { + // Type of activity. Valid values are: * `APPLIED_TRANSITION`: User applied + // the corresponding stage transition. + // + // * `REQUESTED_TRANSITION`: User requested the corresponding stage + // transition. + // + // * `CANCELLED_REQUEST`: User cancelled an existing transition request. + // + // * `APPROVED_REQUEST`: User approved the corresponding stage transition. + // + // * `REJECTED_REQUEST`: User rejected the coressponding stage transition. + // + // * `SYSTEM_TRANSITION`: For events performed as a side effect, such as + // archiving existing model versions in a stage. + ActivityType ActivityType `tfsdk:"activity_type"` + // User-provided comment associated with the activity. + Comment types.String `tfsdk:"comment"` + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // Source stage of the transition (if the activity is stage transition + // related). Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + FromStage Stage `tfsdk:"from_stage"` + // Unique identifier for the object. + Id types.String `tfsdk:"id"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // Comment made by system, for example explaining an activity of type + // `SYSTEM_TRANSITION`. It usually describes a side effect, such as a + // version being archived as part of another version's stage transition, and + // may not be returned for some activity types. + SystemComment types.String `tfsdk:"system_comment"` + // Target stage of the transition (if the activity is stage transition + // related). Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + ToStage Stage `tfsdk:"to_stage"` + // The username of the user that created the object. + UserId types.String `tfsdk:"user_id"` +} + +// An action that a user (with sufficient permissions) could take on an +// activity. Valid values are: * `APPROVE_TRANSITION_REQUEST`: Approve a +// transition request +// +// * `REJECT_TRANSITION_REQUEST`: Reject a transition request +// +// * `CANCEL_TRANSITION_REQUEST`: Cancel (delete) a transition request +type ActivityAction string + +// Approve a transition request +const ActivityActionApproveTransitionRequest ActivityAction = `APPROVE_TRANSITION_REQUEST` + +// Cancel (delete) a transition request +const ActivityActionCancelTransitionRequest ActivityAction = `CANCEL_TRANSITION_REQUEST` + +// Reject a transition request +const ActivityActionRejectTransitionRequest ActivityAction = `REJECT_TRANSITION_REQUEST` + +// String representation for [fmt.Print] +func (f *ActivityAction) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ActivityAction) Set(v string) error { + switch v { + case `APPROVE_TRANSITION_REQUEST`, `CANCEL_TRANSITION_REQUEST`, `REJECT_TRANSITION_REQUEST`: + *f = ActivityAction(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "APPROVE_TRANSITION_REQUEST", "CANCEL_TRANSITION_REQUEST", "REJECT_TRANSITION_REQUEST"`, v) + } +} + +// Type always returns ActivityAction to satisfy [pflag.Value] interface +func (f *ActivityAction) Type() string { + return "ActivityAction" +} + +// Type of activity. Valid values are: * `APPLIED_TRANSITION`: User applied the +// corresponding stage transition. +// +// * `REQUESTED_TRANSITION`: User requested the corresponding stage transition. +// +// * `CANCELLED_REQUEST`: User cancelled an existing transition request. +// +// * `APPROVED_REQUEST`: User approved the corresponding stage transition. +// +// * `REJECTED_REQUEST`: User rejected the coressponding stage transition. +// +// * `SYSTEM_TRANSITION`: For events performed as a side effect, such as +// archiving existing model versions in a stage. +type ActivityType string + +// User applied the corresponding stage transition. +const ActivityTypeAppliedTransition ActivityType = `APPLIED_TRANSITION` + +// User approved the corresponding stage transition. +const ActivityTypeApprovedRequest ActivityType = `APPROVED_REQUEST` + +// User cancelled an existing transition request. +const ActivityTypeCancelledRequest ActivityType = `CANCELLED_REQUEST` + +const ActivityTypeNewComment ActivityType = `NEW_COMMENT` + +// User rejected the coressponding stage transition. +const ActivityTypeRejectedRequest ActivityType = `REJECTED_REQUEST` + +// User requested the corresponding stage transition. +const ActivityTypeRequestedTransition ActivityType = `REQUESTED_TRANSITION` + +// For events performed as a side effect, such as archiving existing model +// versions in a stage. +const ActivityTypeSystemTransition ActivityType = `SYSTEM_TRANSITION` + +// String representation for [fmt.Print] +func (f *ActivityType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ActivityType) Set(v string) error { + switch v { + case `APPLIED_TRANSITION`, `APPROVED_REQUEST`, `CANCELLED_REQUEST`, `NEW_COMMENT`, `REJECTED_REQUEST`, `REQUESTED_TRANSITION`, `SYSTEM_TRANSITION`: + *f = ActivityType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "APPLIED_TRANSITION", "APPROVED_REQUEST", "CANCELLED_REQUEST", "NEW_COMMENT", "REJECTED_REQUEST", "REQUESTED_TRANSITION", "SYSTEM_TRANSITION"`, v) + } +} + +// Type always returns ActivityType to satisfy [pflag.Value] interface +func (f *ActivityType) Type() string { + return "ActivityType" +} + +type ApproveTransitionRequest struct { + // Specifies whether to archive all current model versions in the target + // stage. + ArchiveExistingVersions types.Bool `tfsdk:"archive_existing_versions"` + // User-provided comment on the action. + Comment types.String `tfsdk:"comment"` + // Name of the model. + Name types.String `tfsdk:"name"` + // Target stage of the transition. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage Stage `tfsdk:"stage"` + // Version of the model. + Version types.String `tfsdk:"version"` +} + +type ApproveTransitionRequestResponse struct { + // Activity recorded for the action. + Activity *Activity `tfsdk:"activity"` +} + +// An action that a user (with sufficient permissions) could take on a comment. +// Valid values are: * `EDIT_COMMENT`: Edit the comment +// +// * `DELETE_COMMENT`: Delete the comment +type CommentActivityAction string + +// Delete the comment +const CommentActivityActionDeleteComment CommentActivityAction = `DELETE_COMMENT` + +// Edit the comment +const CommentActivityActionEditComment CommentActivityAction = `EDIT_COMMENT` + +// String representation for [fmt.Print] +func (f *CommentActivityAction) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CommentActivityAction) Set(v string) error { + switch v { + case `DELETE_COMMENT`, `EDIT_COMMENT`: + *f = CommentActivityAction(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETE_COMMENT", "EDIT_COMMENT"`, v) + } +} + +// Type always returns CommentActivityAction to satisfy [pflag.Value] interface +func (f *CommentActivityAction) Type() string { + return "CommentActivityAction" +} + +// Comment details. +type CommentObject struct { + // Array of actions on the activity allowed for the current viewer. + AvailableActions []CommentActivityAction `tfsdk:"available_actions"` + // User-provided comment on the action. + Comment types.String `tfsdk:"comment"` + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // Comment ID + Id types.String `tfsdk:"id"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // The username of the user that created the object. + UserId types.String `tfsdk:"user_id"` +} + +type CreateComment struct { + // User-provided comment on the action. + Comment types.String `tfsdk:"comment"` + // Name of the model. + Name types.String `tfsdk:"name"` + // Version of the model. + Version types.String `tfsdk:"version"` +} + +type CreateCommentResponse struct { + // Comment details. + Comment *CommentObject `tfsdk:"comment"` +} + +type CreateExperiment struct { + // Location where all artifacts for the experiment are stored. If not + // provided, the remote server will select an appropriate default. + ArtifactLocation types.String `tfsdk:"artifact_location"` + // Experiment name. + Name types.String `tfsdk:"name"` + // A collection of tags to set on the experiment. Maximum tag size and + // number of tags per request depends on the storage backend. All storage + // backends are guaranteed to support tag keys up to 250 bytes in size and + // tag values up to 5000 bytes in size. All storage backends are also + // guaranteed to support up to 20 tags per request. + Tags []ExperimentTag `tfsdk:"tags"` +} + +type CreateExperimentResponse struct { + // Unique identifier for the experiment. + ExperimentId types.String `tfsdk:"experiment_id"` +} + +type CreateModelRequest struct { + // Optional description for registered model. + Description types.String `tfsdk:"description"` + // Register models under this name + Name types.String `tfsdk:"name"` + // Additional metadata for registered model. + Tags []ModelTag `tfsdk:"tags"` +} + +type CreateModelResponse struct { + RegisteredModel *Model `tfsdk:"registered_model"` +} + +type CreateModelVersionRequest struct { + // Optional description for model version. + Description types.String `tfsdk:"description"` + // Register model under this name + Name types.String `tfsdk:"name"` + // MLflow run ID for correlation, if `source` was generated by an experiment + // run in MLflow tracking server + RunId types.String `tfsdk:"run_id"` + // MLflow run link - this is the exact link of the run that generated this + // model version, potentially hosted at another instance of MLflow. + RunLink types.String `tfsdk:"run_link"` + // URI indicating the location of the model artifacts. + Source types.String `tfsdk:"source"` + // Additional metadata for model version. + Tags []ModelVersionTag `tfsdk:"tags"` +} + +type CreateModelVersionResponse struct { + // Return new version number generated for this model in registry. + ModelVersion *ModelVersion `tfsdk:"model_version"` +} + +type CreateRegistryWebhook struct { + // User-specified description for the webhook. + Description types.String `tfsdk:"description"` + // Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A + // new model version was created for the associated model. + // + // * `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was + // changed. + // + // * `TRANSITION_REQUEST_CREATED`: A user requested a model version’s + // stage be transitioned. + // + // * `COMMENT_CREATED`: A user wrote a comment on a registered model. + // + // * `REGISTERED_MODEL_CREATED`: A new registered model was created. This + // event type can only be specified for a registry-wide webhook, which can + // be created by not specifying a model name in the create request. + // + // * `MODEL_VERSION_TAG_SET`: A user set a tag on the model version. + // + // * `MODEL_VERSION_TRANSITIONED_TO_STAGING`: A model version was + // transitioned to staging. + // + // * `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`: A model version was + // transitioned to production. + // + // * `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`: A model version was archived. + // + // * `TRANSITION_REQUEST_TO_STAGING_CREATED`: A user requested a model + // version be transitioned to staging. + // + // * `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`: A user requested a model + // version be transitioned to production. + // + // * `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`: A user requested a model + // version be archived. + Events []RegistryWebhookEvent `tfsdk:"events"` + + HttpUrlSpec *HttpUrlSpec `tfsdk:"http_url_spec"` + + JobSpec *JobSpec `tfsdk:"job_spec"` + // Name of the model whose events would trigger this webhook. + ModelName types.String `tfsdk:"model_name"` + // Enable or disable triggering the webhook, or put the webhook into test + // mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an + // associated event happens. + // + // * `DISABLED`: Webhook is not triggered. + // + // * `TEST_MODE`: Webhook can be triggered through the test endpoint, but is + // not triggered on a real event. + Status RegistryWebhookStatus `tfsdk:"status"` +} + +type CreateRun struct { + // ID of the associated experiment. + ExperimentId types.String `tfsdk:"experiment_id"` + // Unix timestamp in milliseconds of when the run started. + StartTime types.Int64 `tfsdk:"start_time"` + // Additional metadata for run. + Tags []RunTag `tfsdk:"tags"` + // ID of the user executing the run. This field is deprecated as of MLflow + // 1.0, and will be removed in a future MLflow release. Use 'mlflow.user' + // tag instead. + UserId types.String `tfsdk:"user_id"` +} + +type CreateRunResponse struct { + // The newly created run. + Run *Run `tfsdk:"run"` +} + +type CreateTransitionRequest struct { + // User-provided comment on the action. + Comment types.String `tfsdk:"comment"` + // Name of the model. + Name types.String `tfsdk:"name"` + // Target stage of the transition. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage Stage `tfsdk:"stage"` + // Version of the model. + Version types.String `tfsdk:"version"` +} + +type CreateTransitionRequestResponse struct { + // Transition request details. + Request *TransitionRequest `tfsdk:"request"` +} + +type CreateWebhookResponse struct { + Webhook *RegistryWebhook `tfsdk:"webhook"` +} + +type Dataset struct { + // Dataset digest, e.g. an md5 hash of the dataset that uniquely identifies + // it within datasets of the same name. + Digest types.String `tfsdk:"digest"` + // The name of the dataset. E.g. “my.uc.table@2” “nyc-taxi-dataset”, + // “fantastic-elk-3” + Name types.String `tfsdk:"name"` + // The profile of the dataset. Summary statistics for the dataset, such as + // the number of rows in a table, the mean / std / mode of each column in a + // table, or the number of elements in an array. + Profile types.String `tfsdk:"profile"` + // The schema of the dataset. E.g., MLflow ColSpec JSON for a dataframe, + // MLflow TensorSpec JSON for an ndarray, or another schema format. + Schema types.String `tfsdk:"schema"` + // The type of the dataset source, e.g. ‘databricks-uc-table’, + // ‘DBFS’, ‘S3’, ... + Source types.String `tfsdk:"source"` + // Source information for the dataset. Note that the source may not exactly + // reproduce the dataset if it was transformed / modified before use with + // MLflow. + SourceType types.String `tfsdk:"source_type"` +} + +type DatasetInput struct { + // The dataset being used as a Run input. + Dataset *Dataset `tfsdk:"dataset"` + // A list of tags for the dataset input, e.g. a “context” tag with value + // “training” + Tags []InputTag `tfsdk:"tags"` +} + +// Delete a comment +type DeleteCommentRequest struct { + Id types.String `tfsdk:"-" url:"id"` +} + +type DeleteCommentResponse struct { +} + +type DeleteExperiment struct { + // ID of the associated experiment. + ExperimentId types.String `tfsdk:"experiment_id"` +} + +type DeleteExperimentResponse struct { +} + +// Delete a model +type DeleteModelRequest struct { + // Registered model unique name identifier. + Name types.String `tfsdk:"-" url:"name"` +} + +type DeleteModelResponse struct { +} + +// Delete a model tag +type DeleteModelTagRequest struct { + // Name of the tag. The name must be an exact match; wild-card deletion is + // not supported. Maximum size is 250 bytes. + Key types.String `tfsdk:"-" url:"key"` + // Name of the registered model that the tag was logged under. + Name types.String `tfsdk:"-" url:"name"` +} + +type DeleteModelTagResponse struct { +} + +// Delete a model version. +type DeleteModelVersionRequest struct { + // Name of the registered model + Name types.String `tfsdk:"-" url:"name"` + // Model version number + Version types.String `tfsdk:"-" url:"version"` +} + +type DeleteModelVersionResponse struct { +} + +// Delete a model version tag +type DeleteModelVersionTagRequest struct { + // Name of the tag. The name must be an exact match; wild-card deletion is + // not supported. Maximum size is 250 bytes. + Key types.String `tfsdk:"-" url:"key"` + // Name of the registered model that the tag was logged under. + Name types.String `tfsdk:"-" url:"name"` + // Model version number that the tag was logged under. + Version types.String `tfsdk:"-" url:"version"` +} + +type DeleteModelVersionTagResponse struct { +} + +type DeleteRun struct { + // ID of the run to delete. + RunId types.String `tfsdk:"run_id"` +} + +type DeleteRunResponse struct { +} + +type DeleteRuns struct { + // The ID of the experiment containing the runs to delete. + ExperimentId types.String `tfsdk:"experiment_id"` + // An optional positive integer indicating the maximum number of runs to + // delete. The maximum allowed value for max_runs is 10000. + MaxRuns types.Int64 `tfsdk:"max_runs"` + // The maximum creation timestamp in milliseconds since the UNIX epoch for + // deleting runs. Only runs created prior to or at this timestamp are + // deleted. + MaxTimestampMillis types.Int64 `tfsdk:"max_timestamp_millis"` +} + +type DeleteRunsResponse struct { + // The number of runs deleted. + RunsDeleted types.Int64 `tfsdk:"runs_deleted"` +} + +type DeleteTag struct { + // Name of the tag. Maximum size is 255 bytes. Must be provided. + Key types.String `tfsdk:"key"` + // ID of the run that the tag was logged under. Must be provided. + RunId types.String `tfsdk:"run_id"` +} + +type DeleteTagResponse struct { +} + +// Delete a transition request +type DeleteTransitionRequestRequest struct { + // User-provided comment on the action. + Comment types.String `tfsdk:"-" url:"comment,omitempty"` + // Username of the user who created this request. Of the transition requests + // matching the specified details, only the one transition created by this + // user will be deleted. + Creator types.String `tfsdk:"-" url:"creator"` + // Name of the model. + Name types.String `tfsdk:"-" url:"name"` + // Target stage of the transition request. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage DeleteTransitionRequestStage `tfsdk:"-" url:"stage"` + // Version of the model. + Version types.String `tfsdk:"-" url:"version"` +} + +type DeleteTransitionRequestResponse struct { +} + +type DeleteTransitionRequestStage string + +const DeleteTransitionRequestStageArchived DeleteTransitionRequestStage = `Archived` + +const DeleteTransitionRequestStageNone DeleteTransitionRequestStage = `None` + +const DeleteTransitionRequestStageProduction DeleteTransitionRequestStage = `Production` + +const DeleteTransitionRequestStageStaging DeleteTransitionRequestStage = `Staging` + +// String representation for [fmt.Print] +func (f *DeleteTransitionRequestStage) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeleteTransitionRequestStage) Set(v string) error { + switch v { + case `Archived`, `None`, `Production`, `Staging`: + *f = DeleteTransitionRequestStage(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Archived", "None", "Production", "Staging"`, v) + } +} + +// Type always returns DeleteTransitionRequestStage to satisfy [pflag.Value] interface +func (f *DeleteTransitionRequestStage) Type() string { + return "DeleteTransitionRequestStage" +} + +// Delete a webhook +type DeleteWebhookRequest struct { + // Webhook ID required to delete a registry webhook. + Id types.String `tfsdk:"-" url:"id,omitempty"` +} + +type DeleteWebhookResponse struct { +} + +type Experiment struct { + // Location where artifacts for the experiment are stored. + ArtifactLocation types.String `tfsdk:"artifact_location"` + // Creation time + CreationTime types.Int64 `tfsdk:"creation_time"` + // Unique identifier for the experiment. + ExperimentId types.String `tfsdk:"experiment_id"` + // Last update time + LastUpdateTime types.Int64 `tfsdk:"last_update_time"` + // Current life cycle stage of the experiment: "active" or "deleted". + // Deleted experiments are not returned by APIs. + LifecycleStage types.String `tfsdk:"lifecycle_stage"` + // Human readable name that identifies the experiment. + Name types.String `tfsdk:"name"` + // Tags: Additional metadata key-value pairs. + Tags []ExperimentTag `tfsdk:"tags"` +} + +type ExperimentAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel ExperimentPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type ExperimentAccessControlResponse struct { + // All permissions. + AllPermissions []ExperimentPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type ExperimentPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel ExperimentPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type ExperimentPermissionLevel string + +const ExperimentPermissionLevelCanEdit ExperimentPermissionLevel = `CAN_EDIT` + +const ExperimentPermissionLevelCanManage ExperimentPermissionLevel = `CAN_MANAGE` + +const ExperimentPermissionLevelCanRead ExperimentPermissionLevel = `CAN_READ` + +// String representation for [fmt.Print] +func (f *ExperimentPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExperimentPermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_READ`: + *f = ExperimentPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_READ"`, v) + } +} + +// Type always returns ExperimentPermissionLevel to satisfy [pflag.Value] interface +func (f *ExperimentPermissionLevel) Type() string { + return "ExperimentPermissionLevel" +} + +type ExperimentPermissions struct { + AccessControlList []ExperimentAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type ExperimentPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel ExperimentPermissionLevel `tfsdk:"permission_level"` +} + +type ExperimentPermissionsRequest struct { + AccessControlList []ExperimentAccessControlRequest `tfsdk:"access_control_list"` + // The experiment for which to get or manage permissions. + ExperimentId types.String `tfsdk:"-" url:"-"` +} + +type ExperimentTag struct { + // The tag key. + Key types.String `tfsdk:"key"` + // The tag value. + Value types.String `tfsdk:"value"` +} + +type FileInfo struct { + // Size in bytes. Unset for directories. + FileSize types.Int64 `tfsdk:"file_size"` + // Whether the path is a directory. + IsDir types.Bool `tfsdk:"is_dir"` + // Path relative to the root artifact directory run. + Path types.String `tfsdk:"path"` +} + +// Get metadata +type GetByNameRequest struct { + // Name of the associated experiment. + ExperimentName types.String `tfsdk:"-" url:"experiment_name"` +} + +// Get experiment permission levels +type GetExperimentPermissionLevelsRequest struct { + // The experiment for which to get or manage permissions. + ExperimentId types.String `tfsdk:"-" url:"-"` +} + +type GetExperimentPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []ExperimentPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get experiment permissions +type GetExperimentPermissionsRequest struct { + // The experiment for which to get or manage permissions. + ExperimentId types.String `tfsdk:"-" url:"-"` +} + +// Get an experiment +type GetExperimentRequest struct { + // ID of the associated experiment. + ExperimentId types.String `tfsdk:"-" url:"experiment_id"` +} + +type GetExperimentResponse struct { + // Experiment details. + Experiment *Experiment `tfsdk:"experiment"` +} + +// Get history of a given metric within a run +type GetHistoryRequest struct { + // Maximum number of Metric records to return per paginated request. Default + // is set to 25,000. If set higher than 25,000, a request Exception will be + // raised. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Name of the metric. + MetricKey types.String `tfsdk:"-" url:"metric_key"` + // Token indicating the page of metric histories to fetch. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // ID of the run from which to fetch metric values. Must be provided. + RunId types.String `tfsdk:"-" url:"run_id,omitempty"` + // [Deprecated, use run_id instead] ID of the run from which to fetch metric + // values. This field will be removed in a future MLflow version. + RunUuid types.String `tfsdk:"-" url:"run_uuid,omitempty"` +} + +type GetLatestVersionsRequest struct { + // Registered model unique name identifier. + Name types.String `tfsdk:"name"` + // List of stages. + Stages []types.String `tfsdk:"stages"` +} + +type GetLatestVersionsResponse struct { + // Latest version models for each requests stage. Only return models with + // current `READY` status. If no `stages` provided, returns the latest + // version for each stage, including `"None"`. + ModelVersions []ModelVersion `tfsdk:"model_versions"` +} + +type GetMetricHistoryResponse struct { + // All logged values for this metric. + Metrics []Metric `tfsdk:"metrics"` + // Token that can be used to retrieve the next page of metric history + // results + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// Get model +type GetModelRequest struct { + // Registered model unique name identifier. + Name types.String `tfsdk:"-" url:"name"` +} + +type GetModelResponse struct { + RegisteredModelDatabricks *ModelDatabricks `tfsdk:"registered_model_databricks"` +} + +// Get a model version URI +type GetModelVersionDownloadUriRequest struct { + // Name of the registered model + Name types.String `tfsdk:"-" url:"name"` + // Model version number + Version types.String `tfsdk:"-" url:"version"` +} + +type GetModelVersionDownloadUriResponse struct { + // URI corresponding to where artifacts for this model version are stored. + ArtifactUri types.String `tfsdk:"artifact_uri"` +} + +// Get a model version +type GetModelVersionRequest struct { + // Name of the registered model + Name types.String `tfsdk:"-" url:"name"` + // Model version number + Version types.String `tfsdk:"-" url:"version"` +} + +type GetModelVersionResponse struct { + ModelVersion *ModelVersion `tfsdk:"model_version"` +} + +// Get registered model permission levels +type GetRegisteredModelPermissionLevelsRequest struct { + // The registered model for which to get or manage permissions. + RegisteredModelId types.String `tfsdk:"-" url:"-"` +} + +type GetRegisteredModelPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []RegisteredModelPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get registered model permissions +type GetRegisteredModelPermissionsRequest struct { + // The registered model for which to get or manage permissions. + RegisteredModelId types.String `tfsdk:"-" url:"-"` +} + +// Get a run +type GetRunRequest struct { + // ID of the run to fetch. Must be provided. + RunId types.String `tfsdk:"-" url:"run_id"` + // [Deprecated, use run_id instead] ID of the run to fetch. This field will + // be removed in a future MLflow version. + RunUuid types.String `tfsdk:"-" url:"run_uuid,omitempty"` +} + +type GetRunResponse struct { + // Run metadata (name, start time, etc) and data (metrics, params, and + // tags). + Run *Run `tfsdk:"run"` +} + +type HttpUrlSpec struct { + // Value of the authorization header that should be sent in the request sent + // by the wehbook. It should be of the form `" "`. + // If set to an empty string, no authorization header will be included in + // the request. + Authorization types.String `tfsdk:"authorization"` + // Enable/disable SSL certificate validation. Default is true. For + // self-signed certificates, this field must be false AND the destination + // server must disable certificate validation as well. For security + // purposes, it is encouraged to perform secret validation with the + // HMAC-encoded portion of the payload and acknowledge the risk associated + // with disabling hostname validation whereby it becomes more likely that + // requests can be maliciously routed to an unintended host. + EnableSslVerification types.Bool `tfsdk:"enable_ssl_verification"` + // Shared secret required for HMAC encoding payload. The HMAC-encoded + // payload will be sent in the header as: { "X-Databricks-Signature": + // $encoded_payload }. + Secret types.String `tfsdk:"secret"` + // External HTTPS URL called on event trigger (by using a POST request). + Url types.String `tfsdk:"url"` +} + +type HttpUrlSpecWithoutSecret struct { + // Enable/disable SSL certificate validation. Default is true. For + // self-signed certificates, this field must be false AND the destination + // server must disable certificate validation as well. For security + // purposes, it is encouraged to perform secret validation with the + // HMAC-encoded portion of the payload and acknowledge the risk associated + // with disabling hostname validation whereby it becomes more likely that + // requests can be maliciously routed to an unintended host. + EnableSslVerification types.Bool `tfsdk:"enable_ssl_verification"` + // External HTTPS URL called on event trigger (by using a POST request). + Url types.String `tfsdk:"url"` +} + +type InputTag struct { + // The tag key. + Key types.String `tfsdk:"key"` + // The tag value. + Value types.String `tfsdk:"value"` +} + +type JobSpec struct { + // The personal access token used to authorize webhook's job runs. + AccessToken types.String `tfsdk:"access_token"` + // ID of the job that the webhook runs. + JobId types.String `tfsdk:"job_id"` + // URL of the workspace containing the job that this webhook runs. If not + // specified, the job’s workspace URL is assumed to be the same as the + // workspace where the webhook is created. + WorkspaceUrl types.String `tfsdk:"workspace_url"` +} + +type JobSpecWithoutSecret struct { + // ID of the job that the webhook runs. + JobId types.String `tfsdk:"job_id"` + // URL of the workspace containing the job that this webhook runs. Defaults + // to the workspace URL in which the webhook is created. If not specified, + // the job’s workspace is assumed to be the same as the webhook’s. + WorkspaceUrl types.String `tfsdk:"workspace_url"` +} + +// Get all artifacts +type ListArtifactsRequest struct { + // Token indicating the page of artifact results to fetch + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // Filter artifacts matching this path (a relative path from the root + // artifact directory). + Path types.String `tfsdk:"-" url:"path,omitempty"` + // ID of the run whose artifacts to list. Must be provided. + RunId types.String `tfsdk:"-" url:"run_id,omitempty"` + // [Deprecated, use run_id instead] ID of the run whose artifacts to list. + // This field will be removed in a future MLflow version. + RunUuid types.String `tfsdk:"-" url:"run_uuid,omitempty"` +} + +type ListArtifactsResponse struct { + // File location and metadata for artifacts. + Files []FileInfo `tfsdk:"files"` + // Token that can be used to retrieve the next page of artifact results + NextPageToken types.String `tfsdk:"next_page_token"` + // Root artifact directory for the run. + RootUri types.String `tfsdk:"root_uri"` +} + +// List experiments +type ListExperimentsRequest struct { + // Maximum number of experiments desired. If `max_results` is unspecified, + // return all experiments. If `max_results` is too large, it'll be + // automatically capped at 1000. Callers of this endpoint are encouraged to + // pass max_results explicitly and leverage page_token to iterate through + // experiments. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Token indicating the page of experiments to fetch + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // Qualifier for type of experiments to be returned. If unspecified, return + // only active experiments. + ViewType types.String `tfsdk:"-" url:"view_type,omitempty"` +} + +type ListExperimentsResponse struct { + // Paginated Experiments beginning with the first item on the requested + // page. + Experiments []Experiment `tfsdk:"experiments"` + // Token that can be used to retrieve the next page of experiments. Empty + // token means no more experiment is available for retrieval. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List models +type ListModelsRequest struct { + // Maximum number of registered models desired. Max threshold is 1000. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Pagination token to go to the next page based on a previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListModelsResponse struct { + // Pagination token to request next page of models for the same query. + NextPageToken types.String `tfsdk:"next_page_token"` + + RegisteredModels []Model `tfsdk:"registered_models"` +} + +type ListRegistryWebhooks struct { + // Token that can be used to retrieve the next page of artifact results + NextPageToken types.String `tfsdk:"next_page_token"` + // Array of registry webhooks. + Webhooks []RegistryWebhook `tfsdk:"webhooks"` +} + +// List transition requests +type ListTransitionRequestsRequest struct { + // Name of the model. + Name types.String `tfsdk:"-" url:"name"` + // Version of the model. + Version types.String `tfsdk:"-" url:"version"` +} + +type ListTransitionRequestsResponse struct { + // Array of open transition requests. + Requests []Activity `tfsdk:"requests"` +} + +// List registry webhooks +type ListWebhooksRequest struct { + // If `events` is specified, any webhook with one or more of the specified + // trigger events is included in the output. If `events` is not specified, + // webhooks of all event types are included in the output. + Events []RegistryWebhookEvent `tfsdk:"-" url:"events,omitempty"` + // If not specified, all webhooks associated with the specified events are + // listed, regardless of their associated model. + ModelName types.String `tfsdk:"-" url:"model_name,omitempty"` + // Token indicating the page of artifact results to fetch + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type LogBatch struct { + // Metrics to log. A single request can contain up to 1000 metrics, and up + // to 1000 metrics, params, and tags in total. + Metrics []Metric `tfsdk:"metrics"` + // Params to log. A single request can contain up to 100 params, and up to + // 1000 metrics, params, and tags in total. + Params []Param `tfsdk:"params"` + // ID of the run to log under + RunId types.String `tfsdk:"run_id"` + // Tags to log. A single request can contain up to 100 tags, and up to 1000 + // metrics, params, and tags in total. + Tags []RunTag `tfsdk:"tags"` +} + +type LogBatchResponse struct { +} + +type LogInputs struct { + // Dataset inputs + Datasets []DatasetInput `tfsdk:"datasets"` + // ID of the run to log under + RunId types.String `tfsdk:"run_id"` +} + +type LogInputsResponse struct { +} + +type LogMetric struct { + // Name of the metric. + Key types.String `tfsdk:"key"` + // ID of the run under which to log the metric. Must be provided. + RunId types.String `tfsdk:"run_id"` + // [Deprecated, use run_id instead] ID of the run under which to log the + // metric. This field will be removed in a future MLflow version. + RunUuid types.String `tfsdk:"run_uuid"` + // Step at which to log the metric + Step types.Int64 `tfsdk:"step"` + // Unix timestamp in milliseconds at the time metric was logged. + Timestamp types.Int64 `tfsdk:"timestamp"` + // Double value of the metric being logged. + Value types.Float64 `tfsdk:"value"` +} + +type LogMetricResponse struct { +} + +type LogModel struct { + // MLmodel file in json format. + ModelJson types.String `tfsdk:"model_json"` + // ID of the run to log under + RunId types.String `tfsdk:"run_id"` +} + +type LogModelResponse struct { +} + +type LogParam struct { + // Name of the param. Maximum size is 255 bytes. + Key types.String `tfsdk:"key"` + // ID of the run under which to log the param. Must be provided. + RunId types.String `tfsdk:"run_id"` + // [Deprecated, use run_id instead] ID of the run under which to log the + // param. This field will be removed in a future MLflow version. + RunUuid types.String `tfsdk:"run_uuid"` + // String value of the param being logged. Maximum size is 500 bytes. + Value types.String `tfsdk:"value"` +} + +type LogParamResponse struct { +} + +type Metric struct { + // Key identifying this metric. + Key types.String `tfsdk:"key"` + // Step at which to log the metric. + Step types.Int64 `tfsdk:"step"` + // The timestamp at which this metric was recorded. + Timestamp types.Int64 `tfsdk:"timestamp"` + // Value associated with this metric. + Value types.Float64 `tfsdk:"value"` +} + +type Model struct { + // Timestamp recorded when this `registered_model` was created. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // Description of this `registered_model`. + Description types.String `tfsdk:"description"` + // Timestamp recorded when metadata for this `registered_model` was last + // updated. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // Collection of latest model versions for each stage. Only contains models + // with current `READY` status. + LatestVersions []ModelVersion `tfsdk:"latest_versions"` + // Unique name for the model. + Name types.String `tfsdk:"name"` + // Tags: Additional metadata key-value pairs for this `registered_model`. + Tags []ModelTag `tfsdk:"tags"` + // User that created this `registered_model` + UserId types.String `tfsdk:"user_id"` +} + +type ModelDatabricks struct { + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // User-specified description for the object. + Description types.String `tfsdk:"description"` + // Unique identifier for the object. + Id types.String `tfsdk:"id"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // Array of model versions, each the latest version for its stage. + LatestVersions []ModelVersion `tfsdk:"latest_versions"` + // Name of the model. + Name types.String `tfsdk:"name"` + // Permission level of the requesting user on the object. For what is + // allowed at each level, see [MLflow Model permissions](..). + PermissionLevel PermissionLevel `tfsdk:"permission_level"` + // Array of tags associated with the model. + Tags []ModelTag `tfsdk:"tags"` + // The username of the user that created the object. + UserId types.String `tfsdk:"user_id"` +} + +type ModelTag struct { + // The tag key. + Key types.String `tfsdk:"key"` + // The tag value. + Value types.String `tfsdk:"value"` +} + +type ModelVersion struct { + // Timestamp recorded when this `model_version` was created. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // Current stage for this `model_version`. + CurrentStage types.String `tfsdk:"current_stage"` + // Description of this `model_version`. + Description types.String `tfsdk:"description"` + // Timestamp recorded when metadata for this `model_version` was last + // updated. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // Unique name of the model + Name types.String `tfsdk:"name"` + // MLflow run ID used when creating `model_version`, if `source` was + // generated by an experiment run stored in MLflow tracking server. + RunId types.String `tfsdk:"run_id"` + // Run Link: Direct link to the run that generated this version + RunLink types.String `tfsdk:"run_link"` + // URI indicating the location of the source model artifacts, used when + // creating `model_version` + Source types.String `tfsdk:"source"` + // Current status of `model_version` + Status ModelVersionStatus `tfsdk:"status"` + // Details on current `status`, if it is pending or failed. + StatusMessage types.String `tfsdk:"status_message"` + // Tags: Additional metadata key-value pairs for this `model_version`. + Tags []ModelVersionTag `tfsdk:"tags"` + // User that created this `model_version`. + UserId types.String `tfsdk:"user_id"` + // Model's version number. + Version types.String `tfsdk:"version"` +} + +type ModelVersionDatabricks struct { + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // Stage of the model version. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + CurrentStage Stage `tfsdk:"current_stage"` + // User-specified description for the object. + Description types.String `tfsdk:"description"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // Name of the model. + Name types.String `tfsdk:"name"` + // Permission level of the requesting user on the object. For what is + // allowed at each level, see [MLflow Model permissions](..). + PermissionLevel PermissionLevel `tfsdk:"permission_level"` + // Unique identifier for the MLflow tracking run associated with the source + // model artifacts. + RunId types.String `tfsdk:"run_id"` + // URL of the run associated with the model artifacts. This field is set at + // model version creation time only for model versions whose source run is + // from a tracking server that is different from the registry server. + RunLink types.String `tfsdk:"run_link"` + // URI that indicates the location of the source model artifacts. This is + // used when creating the model version. + Source types.String `tfsdk:"source"` + // The status of the model version. Valid values are: * + // `PENDING_REGISTRATION`: Request to register a new model version is + // pending as server performs background tasks. + // + // * `FAILED_REGISTRATION`: Request to register a new model version has + // failed. + // + // * `READY`: Model version is ready for use. + Status Status `tfsdk:"status"` + // Details on the current status, for example why registration failed. + StatusMessage types.String `tfsdk:"status_message"` + // Array of tags that are associated with the model version. + Tags []ModelVersionTag `tfsdk:"tags"` + // The username of the user that created the object. + UserId types.String `tfsdk:"user_id"` + // Version of the model. + Version types.String `tfsdk:"version"` +} + +// Current status of `model_version` +type ModelVersionStatus string + +const ModelVersionStatusFailedRegistration ModelVersionStatus = `FAILED_REGISTRATION` + +const ModelVersionStatusPendingRegistration ModelVersionStatus = `PENDING_REGISTRATION` + +const ModelVersionStatusReady ModelVersionStatus = `READY` + +// String representation for [fmt.Print] +func (f *ModelVersionStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ModelVersionStatus) Set(v string) error { + switch v { + case `FAILED_REGISTRATION`, `PENDING_REGISTRATION`, `READY`: + *f = ModelVersionStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED_REGISTRATION", "PENDING_REGISTRATION", "READY"`, v) + } +} + +// Type always returns ModelVersionStatus to satisfy [pflag.Value] interface +func (f *ModelVersionStatus) Type() string { + return "ModelVersionStatus" +} + +type ModelVersionTag struct { + // The tag key. + Key types.String `tfsdk:"key"` + // The tag value. + Value types.String `tfsdk:"value"` +} + +type Param struct { + // Key identifying this param. + Key types.String `tfsdk:"key"` + // Value associated with this param. + Value types.String `tfsdk:"value"` +} + +// Permission level of the requesting user on the object. For what is allowed at +// each level, see [MLflow Model permissions](..). +type PermissionLevel string + +const PermissionLevelCanEdit PermissionLevel = `CAN_EDIT` + +const PermissionLevelCanManage PermissionLevel = `CAN_MANAGE` + +const PermissionLevelCanManageProductionVersions PermissionLevel = `CAN_MANAGE_PRODUCTION_VERSIONS` + +const PermissionLevelCanManageStagingVersions PermissionLevel = `CAN_MANAGE_STAGING_VERSIONS` + +const PermissionLevelCanRead PermissionLevel = `CAN_READ` + +// String representation for [fmt.Print] +func (f *PermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_MANAGE_PRODUCTION_VERSIONS`, `CAN_MANAGE_STAGING_VERSIONS`, `CAN_READ`: + *f = PermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE_STAGING_VERSIONS", "CAN_READ"`, v) + } +} + +// Type always returns PermissionLevel to satisfy [pflag.Value] interface +func (f *PermissionLevel) Type() string { + return "PermissionLevel" +} + +type RegisteredModelAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel RegisteredModelPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type RegisteredModelAccessControlResponse struct { + // All permissions. + AllPermissions []RegisteredModelPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type RegisteredModelPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel RegisteredModelPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type RegisteredModelPermissionLevel string + +const RegisteredModelPermissionLevelCanEdit RegisteredModelPermissionLevel = `CAN_EDIT` + +const RegisteredModelPermissionLevelCanManage RegisteredModelPermissionLevel = `CAN_MANAGE` + +const RegisteredModelPermissionLevelCanManageProductionVersions RegisteredModelPermissionLevel = `CAN_MANAGE_PRODUCTION_VERSIONS` + +const RegisteredModelPermissionLevelCanManageStagingVersions RegisteredModelPermissionLevel = `CAN_MANAGE_STAGING_VERSIONS` + +const RegisteredModelPermissionLevelCanRead RegisteredModelPermissionLevel = `CAN_READ` + +// String representation for [fmt.Print] +func (f *RegisteredModelPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RegisteredModelPermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_MANAGE_PRODUCTION_VERSIONS`, `CAN_MANAGE_STAGING_VERSIONS`, `CAN_READ`: + *f = RegisteredModelPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE_STAGING_VERSIONS", "CAN_READ"`, v) + } +} + +// Type always returns RegisteredModelPermissionLevel to satisfy [pflag.Value] interface +func (f *RegisteredModelPermissionLevel) Type() string { + return "RegisteredModelPermissionLevel" +} + +type RegisteredModelPermissions struct { + AccessControlList []RegisteredModelAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type RegisteredModelPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel RegisteredModelPermissionLevel `tfsdk:"permission_level"` +} + +type RegisteredModelPermissionsRequest struct { + AccessControlList []RegisteredModelAccessControlRequest `tfsdk:"access_control_list"` + // The registered model for which to get or manage permissions. + RegisteredModelId types.String `tfsdk:"-" url:"-"` +} + +type RegistryWebhook struct { + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // User-specified description for the webhook. + Description types.String `tfsdk:"description"` + // Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A + // new model version was created for the associated model. + // + // * `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was + // changed. + // + // * `TRANSITION_REQUEST_CREATED`: A user requested a model version’s + // stage be transitioned. + // + // * `COMMENT_CREATED`: A user wrote a comment on a registered model. + // + // * `REGISTERED_MODEL_CREATED`: A new registered model was created. This + // event type can only be specified for a registry-wide webhook, which can + // be created by not specifying a model name in the create request. + // + // * `MODEL_VERSION_TAG_SET`: A user set a tag on the model version. + // + // * `MODEL_VERSION_TRANSITIONED_TO_STAGING`: A model version was + // transitioned to staging. + // + // * `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`: A model version was + // transitioned to production. + // + // * `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`: A model version was archived. + // + // * `TRANSITION_REQUEST_TO_STAGING_CREATED`: A user requested a model + // version be transitioned to staging. + // + // * `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`: A user requested a model + // version be transitioned to production. + // + // * `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`: A user requested a model + // version be archived. + Events []RegistryWebhookEvent `tfsdk:"events"` + + HttpUrlSpec *HttpUrlSpecWithoutSecret `tfsdk:"http_url_spec"` + // Webhook ID + Id types.String `tfsdk:"id"` + + JobSpec *JobSpecWithoutSecret `tfsdk:"job_spec"` + // Time of the object at last update, as a Unix timestamp in milliseconds. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // Name of the model whose events would trigger this webhook. + ModelName types.String `tfsdk:"model_name"` + // Enable or disable triggering the webhook, or put the webhook into test + // mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an + // associated event happens. + // + // * `DISABLED`: Webhook is not triggered. + // + // * `TEST_MODE`: Webhook can be triggered through the test endpoint, but is + // not triggered on a real event. + Status RegistryWebhookStatus `tfsdk:"status"` +} + +type RegistryWebhookEvent string + +const RegistryWebhookEventCommentCreated RegistryWebhookEvent = `COMMENT_CREATED` + +const RegistryWebhookEventModelVersionCreated RegistryWebhookEvent = `MODEL_VERSION_CREATED` + +const RegistryWebhookEventModelVersionTagSet RegistryWebhookEvent = `MODEL_VERSION_TAG_SET` + +const RegistryWebhookEventModelVersionTransitionedStage RegistryWebhookEvent = `MODEL_VERSION_TRANSITIONED_STAGE` + +const RegistryWebhookEventModelVersionTransitionedToArchived RegistryWebhookEvent = `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED` + +const RegistryWebhookEventModelVersionTransitionedToProduction RegistryWebhookEvent = `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION` + +const RegistryWebhookEventModelVersionTransitionedToStaging RegistryWebhookEvent = `MODEL_VERSION_TRANSITIONED_TO_STAGING` + +const RegistryWebhookEventRegisteredModelCreated RegistryWebhookEvent = `REGISTERED_MODEL_CREATED` + +const RegistryWebhookEventTransitionRequestCreated RegistryWebhookEvent = `TRANSITION_REQUEST_CREATED` + +const RegistryWebhookEventTransitionRequestToArchivedCreated RegistryWebhookEvent = `TRANSITION_REQUEST_TO_ARCHIVED_CREATED` + +const RegistryWebhookEventTransitionRequestToProductionCreated RegistryWebhookEvent = `TRANSITION_REQUEST_TO_PRODUCTION_CREATED` + +const RegistryWebhookEventTransitionRequestToStagingCreated RegistryWebhookEvent = `TRANSITION_REQUEST_TO_STAGING_CREATED` + +// String representation for [fmt.Print] +func (f *RegistryWebhookEvent) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RegistryWebhookEvent) Set(v string) error { + switch v { + case `COMMENT_CREATED`, `MODEL_VERSION_CREATED`, `MODEL_VERSION_TAG_SET`, `MODEL_VERSION_TRANSITIONED_STAGE`, `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`, `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`, `MODEL_VERSION_TRANSITIONED_TO_STAGING`, `REGISTERED_MODEL_CREATED`, `TRANSITION_REQUEST_CREATED`, `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`, `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`, `TRANSITION_REQUEST_TO_STAGING_CREATED`: + *f = RegistryWebhookEvent(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COMMENT_CREATED", "MODEL_VERSION_CREATED", "MODEL_VERSION_TAG_SET", "MODEL_VERSION_TRANSITIONED_STAGE", "MODEL_VERSION_TRANSITIONED_TO_ARCHIVED", "MODEL_VERSION_TRANSITIONED_TO_PRODUCTION", "MODEL_VERSION_TRANSITIONED_TO_STAGING", "REGISTERED_MODEL_CREATED", "TRANSITION_REQUEST_CREATED", "TRANSITION_REQUEST_TO_ARCHIVED_CREATED", "TRANSITION_REQUEST_TO_PRODUCTION_CREATED", "TRANSITION_REQUEST_TO_STAGING_CREATED"`, v) + } +} + +// Type always returns RegistryWebhookEvent to satisfy [pflag.Value] interface +func (f *RegistryWebhookEvent) Type() string { + return "RegistryWebhookEvent" +} + +// Enable or disable triggering the webhook, or put the webhook into test mode. +// The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an associated +// event happens. +// +// * `DISABLED`: Webhook is not triggered. +// +// * `TEST_MODE`: Webhook can be triggered through the test endpoint, but is not +// triggered on a real event. +type RegistryWebhookStatus string + +// Webhook is triggered when an associated event happens. +const RegistryWebhookStatusActive RegistryWebhookStatus = `ACTIVE` + +// Webhook is not triggered. +const RegistryWebhookStatusDisabled RegistryWebhookStatus = `DISABLED` + +// Webhook can be triggered through the test endpoint, but is not triggered on a +// real event. +const RegistryWebhookStatusTestMode RegistryWebhookStatus = `TEST_MODE` + +// String representation for [fmt.Print] +func (f *RegistryWebhookStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RegistryWebhookStatus) Set(v string) error { + switch v { + case `ACTIVE`, `DISABLED`, `TEST_MODE`: + *f = RegistryWebhookStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "DISABLED", "TEST_MODE"`, v) + } +} + +// Type always returns RegistryWebhookStatus to satisfy [pflag.Value] interface +func (f *RegistryWebhookStatus) Type() string { + return "RegistryWebhookStatus" +} + +type RejectTransitionRequest struct { + // User-provided comment on the action. + Comment types.String `tfsdk:"comment"` + // Name of the model. + Name types.String `tfsdk:"name"` + // Target stage of the transition. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage Stage `tfsdk:"stage"` + // Version of the model. + Version types.String `tfsdk:"version"` +} + +type RejectTransitionRequestResponse struct { + // Activity recorded for the action. + Activity *Activity `tfsdk:"activity"` +} + +type RenameModelRequest struct { + // Registered model unique name identifier. + Name types.String `tfsdk:"name"` + // If provided, updates the name for this `registered_model`. + NewName types.String `tfsdk:"new_name"` +} + +type RenameModelResponse struct { + RegisteredModel *Model `tfsdk:"registered_model"` +} + +type RestoreExperiment struct { + // ID of the associated experiment. + ExperimentId types.String `tfsdk:"experiment_id"` +} + +type RestoreExperimentResponse struct { +} + +type RestoreRun struct { + // ID of the run to restore. + RunId types.String `tfsdk:"run_id"` +} + +type RestoreRunResponse struct { +} + +type RestoreRuns struct { + // The ID of the experiment containing the runs to restore. + ExperimentId types.String `tfsdk:"experiment_id"` + // An optional positive integer indicating the maximum number of runs to + // restore. The maximum allowed value for max_runs is 10000. + MaxRuns types.Int64 `tfsdk:"max_runs"` + // The minimum deletion timestamp in milliseconds since the UNIX epoch for + // restoring runs. Only runs deleted no earlier than this timestamp are + // restored. + MinTimestampMillis types.Int64 `tfsdk:"min_timestamp_millis"` +} + +type RestoreRunsResponse struct { + // The number of runs restored. + RunsRestored types.Int64 `tfsdk:"runs_restored"` +} + +type Run struct { + // Run data. + Data *RunData `tfsdk:"data"` + // Run metadata. + Info *RunInfo `tfsdk:"info"` + // Run inputs. + Inputs *RunInputs `tfsdk:"inputs"` +} + +type RunData struct { + // Run metrics. + Metrics []Metric `tfsdk:"metrics"` + // Run parameters. + Params []Param `tfsdk:"params"` + // Additional metadata key-value pairs. + Tags []RunTag `tfsdk:"tags"` +} + +type RunInfo struct { + // URI of the directory where artifacts should be uploaded. This can be a + // local path (starting with "/"), or a distributed file system (DFS) path, + // like `s3://bucket/directory` or `dbfs:/my/directory`. If not set, the + // local `./mlruns` directory is chosen. + ArtifactUri types.String `tfsdk:"artifact_uri"` + // Unix timestamp of when the run ended in milliseconds. + EndTime types.Int64 `tfsdk:"end_time"` + // The experiment ID. + ExperimentId types.String `tfsdk:"experiment_id"` + // Current life cycle stage of the experiment : OneOf("active", "deleted") + LifecycleStage types.String `tfsdk:"lifecycle_stage"` + // Unique identifier for the run. + RunId types.String `tfsdk:"run_id"` + // [Deprecated, use run_id instead] Unique identifier for the run. This + // field will be removed in a future MLflow version. + RunUuid types.String `tfsdk:"run_uuid"` + // Unix timestamp of when the run started in milliseconds. + StartTime types.Int64 `tfsdk:"start_time"` + // Current status of the run. + Status RunInfoStatus `tfsdk:"status"` + // User who initiated the run. This field is deprecated as of MLflow 1.0, + // and will be removed in a future MLflow release. Use 'mlflow.user' tag + // instead. + UserId types.String `tfsdk:"user_id"` +} + +// Current status of the run. +type RunInfoStatus string + +const RunInfoStatusFailed RunInfoStatus = `FAILED` + +const RunInfoStatusFinished RunInfoStatus = `FINISHED` + +const RunInfoStatusKilled RunInfoStatus = `KILLED` + +const RunInfoStatusRunning RunInfoStatus = `RUNNING` + +const RunInfoStatusScheduled RunInfoStatus = `SCHEDULED` + +// String representation for [fmt.Print] +func (f *RunInfoStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunInfoStatus) Set(v string) error { + switch v { + case `FAILED`, `FINISHED`, `KILLED`, `RUNNING`, `SCHEDULED`: + *f = RunInfoStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED", "FINISHED", "KILLED", "RUNNING", "SCHEDULED"`, v) + } +} + +// Type always returns RunInfoStatus to satisfy [pflag.Value] interface +func (f *RunInfoStatus) Type() string { + return "RunInfoStatus" +} + +type RunInputs struct { + // Run metrics. + DatasetInputs []DatasetInput `tfsdk:"dataset_inputs"` +} + +type RunTag struct { + // The tag key. + Key types.String `tfsdk:"key"` + // The tag value. + Value types.String `tfsdk:"value"` +} + +type SearchExperiments struct { + // String representing a SQL filter condition (e.g. "name ILIKE + // 'my-experiment%'") + Filter types.String `tfsdk:"filter"` + // Maximum number of experiments desired. Max threshold is 3000. + MaxResults types.Int64 `tfsdk:"max_results"` + // List of columns for ordering search results, which can include experiment + // name and last updated timestamp with an optional "DESC" or "ASC" + // annotation, where "ASC" is the default. Tiebreaks are done by experiment + // id DESC. + OrderBy []types.String `tfsdk:"order_by"` + // Token indicating the page of experiments to fetch + PageToken types.String `tfsdk:"page_token"` + // Qualifier for type of experiments to be returned. If unspecified, return + // only active experiments. + ViewType SearchExperimentsViewType `tfsdk:"view_type"` +} + +type SearchExperimentsResponse struct { + // Experiments that match the search criteria + Experiments []Experiment `tfsdk:"experiments"` + // Token that can be used to retrieve the next page of experiments. An empty + // token means that no more experiments are available for retrieval. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// Qualifier for type of experiments to be returned. If unspecified, return only +// active experiments. +type SearchExperimentsViewType string + +const SearchExperimentsViewTypeActiveOnly SearchExperimentsViewType = `ACTIVE_ONLY` + +const SearchExperimentsViewTypeAll SearchExperimentsViewType = `ALL` + +const SearchExperimentsViewTypeDeletedOnly SearchExperimentsViewType = `DELETED_ONLY` + +// String representation for [fmt.Print] +func (f *SearchExperimentsViewType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SearchExperimentsViewType) Set(v string) error { + switch v { + case `ACTIVE_ONLY`, `ALL`, `DELETED_ONLY`: + *f = SearchExperimentsViewType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE_ONLY", "ALL", "DELETED_ONLY"`, v) + } +} + +// Type always returns SearchExperimentsViewType to satisfy [pflag.Value] interface +func (f *SearchExperimentsViewType) Type() string { + return "SearchExperimentsViewType" +} + +// Searches model versions +type SearchModelVersionsRequest struct { + // String filter condition, like "name='my-model-name'". Must be a single + // boolean condition, with string values wrapped in single quotes. + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Maximum number of models desired. Max threshold is 10K. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // List of columns to be ordered by including model name, version, stage + // with an optional "DESC" or "ASC" annotation, where "ASC" is the default. + // Tiebreaks are done by latest stage transition timestamp, followed by name + // ASC, followed by version DESC. + OrderBy []types.String `tfsdk:"-" url:"order_by,omitempty"` + // Pagination token to go to next page based on previous search query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type SearchModelVersionsResponse struct { + // Models that match the search criteria + ModelVersions []ModelVersion `tfsdk:"model_versions"` + // Pagination token to request next page of models for the same search + // query. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// Search models +type SearchModelsRequest struct { + // String filter condition, like "name LIKE 'my-model-name'". Interpreted in + // the backend automatically as "name LIKE '%my-model-name%'". Single + // boolean condition, with string values wrapped in single quotes. + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Maximum number of models desired. Default is 100. Max threshold is 1000. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // List of columns for ordering search results, which can include model name + // and last updated timestamp with an optional "DESC" or "ASC" annotation, + // where "ASC" is the default. Tiebreaks are done by model name ASC. + OrderBy []types.String `tfsdk:"-" url:"order_by,omitempty"` + // Pagination token to go to the next page based on a previous search query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type SearchModelsResponse struct { + // Pagination token to request the next page of models. + NextPageToken types.String `tfsdk:"next_page_token"` + // Registered Models that match the search criteria. + RegisteredModels []Model `tfsdk:"registered_models"` +} + +type SearchRuns struct { + // List of experiment IDs to search over. + ExperimentIds []types.String `tfsdk:"experiment_ids"` + // A filter expression over params, metrics, and tags, that allows returning + // a subset of runs. The syntax is a subset of SQL that supports ANDing + // together binary operations between a param, metric, or tag and a + // constant. + // + // Example: `metrics.rmse < 1 and params.model_class = 'LogisticRegression'` + // + // You can select columns with special characters (hyphen, space, period, + // etc.) by using double quotes: `metrics."model class" = 'LinearRegression' + // and tags."user-name" = 'Tomas'` + // + // Supported operators are `=`, `!=`, `>`, `>=`, `<`, and `<=`. + Filter types.String `tfsdk:"filter"` + // Maximum number of runs desired. Max threshold is 50000 + MaxResults types.Int64 `tfsdk:"max_results"` + // List of columns to be ordered by, including attributes, params, metrics, + // and tags with an optional "DESC" or "ASC" annotation, where "ASC" is the + // default. Example: ["params.input DESC", "metrics.alpha ASC", + // "metrics.rmse"] Tiebreaks are done by start_time DESC followed by run_id + // for runs with the same start time (and this is the default ordering + // criterion if order_by is not provided). + OrderBy []types.String `tfsdk:"order_by"` + // Token for the current page of runs. + PageToken types.String `tfsdk:"page_token"` + // Whether to display only active, only deleted, or all runs. Defaults to + // only active runs. + RunViewType SearchRunsRunViewType `tfsdk:"run_view_type"` +} + +type SearchRunsResponse struct { + // Token for the next page of runs. + NextPageToken types.String `tfsdk:"next_page_token"` + // Runs that match the search criteria. + Runs []Run `tfsdk:"runs"` +} + +// Whether to display only active, only deleted, or all runs. Defaults to only +// active runs. +type SearchRunsRunViewType string + +const SearchRunsRunViewTypeActiveOnly SearchRunsRunViewType = `ACTIVE_ONLY` + +const SearchRunsRunViewTypeAll SearchRunsRunViewType = `ALL` + +const SearchRunsRunViewTypeDeletedOnly SearchRunsRunViewType = `DELETED_ONLY` + +// String representation for [fmt.Print] +func (f *SearchRunsRunViewType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SearchRunsRunViewType) Set(v string) error { + switch v { + case `ACTIVE_ONLY`, `ALL`, `DELETED_ONLY`: + *f = SearchRunsRunViewType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE_ONLY", "ALL", "DELETED_ONLY"`, v) + } +} + +// Type always returns SearchRunsRunViewType to satisfy [pflag.Value] interface +func (f *SearchRunsRunViewType) Type() string { + return "SearchRunsRunViewType" +} + +type SetExperimentTag struct { + // ID of the experiment under which to log the tag. Must be provided. + ExperimentId types.String `tfsdk:"experiment_id"` + // Name of the tag. Maximum size depends on storage backend. All storage + // backends are guaranteed to support key values up to 250 bytes in size. + Key types.String `tfsdk:"key"` + // String value of the tag being logged. Maximum size depends on storage + // backend. All storage backends are guaranteed to support key values up to + // 5000 bytes in size. + Value types.String `tfsdk:"value"` +} + +type SetExperimentTagResponse struct { +} + +type SetModelTagRequest struct { + // Name of the tag. Maximum size depends on storage backend. If a tag with + // this name already exists, its preexisting value will be replaced by the + // specified `value`. All storage backends are guaranteed to support key + // values up to 250 bytes in size. + Key types.String `tfsdk:"key"` + // Unique name of the model. + Name types.String `tfsdk:"name"` + // String value of the tag being logged. Maximum size depends on storage + // backend. All storage backends are guaranteed to support key values up to + // 5000 bytes in size. + Value types.String `tfsdk:"value"` +} + +type SetModelTagResponse struct { +} + +type SetModelVersionTagRequest struct { + // Name of the tag. Maximum size depends on storage backend. If a tag with + // this name already exists, its preexisting value will be replaced by the + // specified `value`. All storage backends are guaranteed to support key + // values up to 250 bytes in size. + Key types.String `tfsdk:"key"` + // Unique name of the model. + Name types.String `tfsdk:"name"` + // String value of the tag being logged. Maximum size depends on storage + // backend. All storage backends are guaranteed to support key values up to + // 5000 bytes in size. + Value types.String `tfsdk:"value"` + // Model version number. + Version types.String `tfsdk:"version"` +} + +type SetModelVersionTagResponse struct { +} + +type SetTag struct { + // Name of the tag. Maximum size depends on storage backend. All storage + // backends are guaranteed to support key values up to 250 bytes in size. + Key types.String `tfsdk:"key"` + // ID of the run under which to log the tag. Must be provided. + RunId types.String `tfsdk:"run_id"` + // [Deprecated, use run_id instead] ID of the run under which to log the + // tag. This field will be removed in a future MLflow version. + RunUuid types.String `tfsdk:"run_uuid"` + // String value of the tag being logged. Maximum size depends on storage + // backend. All storage backends are guaranteed to support key values up to + // 5000 bytes in size. + Value types.String `tfsdk:"value"` +} + +type SetTagResponse struct { +} + +// Stage of the model version. Valid values are: +// +// * `None`: The initial stage of a model version. +// +// * `Staging`: Staging or pre-production stage. +// +// * `Production`: Production stage. +// +// * `Archived`: Archived stage. +type Stage string + +// Archived stage. +const StageArchived Stage = `Archived` + +// The initial stage of a model version. +const StageNone Stage = `None` + +// Production stage. +const StageProduction Stage = `Production` + +// Staging or pre-production stage. +const StageStaging Stage = `Staging` + +// String representation for [fmt.Print] +func (f *Stage) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Stage) Set(v string) error { + switch v { + case `Archived`, `None`, `Production`, `Staging`: + *f = Stage(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Archived", "None", "Production", "Staging"`, v) + } +} + +// Type always returns Stage to satisfy [pflag.Value] interface +func (f *Stage) Type() string { + return "Stage" +} + +// The status of the model version. Valid values are: * `PENDING_REGISTRATION`: +// Request to register a new model version is pending as server performs +// background tasks. +// +// * `FAILED_REGISTRATION`: Request to register a new model version has failed. +// +// * `READY`: Model version is ready for use. +type Status string + +// Request to register a new model version has failed. +const StatusFailedRegistration Status = `FAILED_REGISTRATION` + +// Request to register a new model version is pending as server performs +// background tasks. +const StatusPendingRegistration Status = `PENDING_REGISTRATION` + +// Model version is ready for use. +const StatusReady Status = `READY` + +// String representation for [fmt.Print] +func (f *Status) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Status) Set(v string) error { + switch v { + case `FAILED_REGISTRATION`, `PENDING_REGISTRATION`, `READY`: + *f = Status(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED_REGISTRATION", "PENDING_REGISTRATION", "READY"`, v) + } +} + +// Type always returns Status to satisfy [pflag.Value] interface +func (f *Status) Type() string { + return "Status" +} + +// Test webhook response object. +type TestRegistryWebhook struct { + // Body of the response from the webhook URL + Body types.String `tfsdk:"body"` + // Status code returned by the webhook URL + StatusCode types.Int64 `tfsdk:"status_code"` +} + +type TestRegistryWebhookRequest struct { + // If `event` is specified, the test trigger uses the specified event. If + // `event` is not specified, the test trigger uses a randomly chosen event + // associated with the webhook. + Event RegistryWebhookEvent `tfsdk:"event"` + // Webhook ID + Id types.String `tfsdk:"id"` +} + +type TestRegistryWebhookResponse struct { + // Test webhook response object. + Webhook *TestRegistryWebhook `tfsdk:"webhook"` +} + +type TransitionModelVersionStageDatabricks struct { + // Specifies whether to archive all current model versions in the target + // stage. + ArchiveExistingVersions types.Bool `tfsdk:"archive_existing_versions"` + // User-provided comment on the action. + Comment types.String `tfsdk:"comment"` + // Name of the model. + Name types.String `tfsdk:"name"` + // Target stage of the transition. Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + Stage Stage `tfsdk:"stage"` + // Version of the model. + Version types.String `tfsdk:"version"` +} + +// Transition request details. +type TransitionRequest struct { + // Array of actions on the activity allowed for the current viewer. + AvailableActions []ActivityAction `tfsdk:"available_actions"` + // User-provided comment associated with the transition request. + Comment types.String `tfsdk:"comment"` + // Creation time of the object, as a Unix timestamp in milliseconds. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // Target stage of the transition (if the activity is stage transition + // related). Valid values are: + // + // * `None`: The initial stage of a model version. + // + // * `Staging`: Staging or pre-production stage. + // + // * `Production`: Production stage. + // + // * `Archived`: Archived stage. + ToStage Stage `tfsdk:"to_stage"` + // The username of the user that created the object. + UserId types.String `tfsdk:"user_id"` +} + +type TransitionStageResponse struct { + ModelVersion *ModelVersionDatabricks `tfsdk:"model_version"` +} + +type UpdateComment struct { + // User-provided comment on the action. + Comment types.String `tfsdk:"comment"` + // Unique identifier of an activity + Id types.String `tfsdk:"id"` +} + +type UpdateCommentResponse struct { + // Comment details. + Comment *CommentObject `tfsdk:"comment"` +} + +type UpdateExperiment struct { + // ID of the associated experiment. + ExperimentId types.String `tfsdk:"experiment_id"` + // If provided, the experiment's name is changed to the new name. The new + // name must be unique. + NewName types.String `tfsdk:"new_name"` +} + +type UpdateExperimentResponse struct { +} + +type UpdateModelRequest struct { + // If provided, updates the description for this `registered_model`. + Description types.String `tfsdk:"description"` + // Registered model unique name identifier. + Name types.String `tfsdk:"name"` +} + +type UpdateModelResponse struct { +} + +type UpdateModelVersionRequest struct { + // If provided, updates the description for this `registered_model`. + Description types.String `tfsdk:"description"` + // Name of the registered model + Name types.String `tfsdk:"name"` + // Model version number + Version types.String `tfsdk:"version"` +} + +type UpdateModelVersionResponse struct { +} + +type UpdateRegistryWebhook struct { + // User-specified description for the webhook. + Description types.String `tfsdk:"description"` + // Events that can trigger a registry webhook: * `MODEL_VERSION_CREATED`: A + // new model version was created for the associated model. + // + // * `MODEL_VERSION_TRANSITIONED_STAGE`: A model version’s stage was + // changed. + // + // * `TRANSITION_REQUEST_CREATED`: A user requested a model version’s + // stage be transitioned. + // + // * `COMMENT_CREATED`: A user wrote a comment on a registered model. + // + // * `REGISTERED_MODEL_CREATED`: A new registered model was created. This + // event type can only be specified for a registry-wide webhook, which can + // be created by not specifying a model name in the create request. + // + // * `MODEL_VERSION_TAG_SET`: A user set a tag on the model version. + // + // * `MODEL_VERSION_TRANSITIONED_TO_STAGING`: A model version was + // transitioned to staging. + // + // * `MODEL_VERSION_TRANSITIONED_TO_PRODUCTION`: A model version was + // transitioned to production. + // + // * `MODEL_VERSION_TRANSITIONED_TO_ARCHIVED`: A model version was archived. + // + // * `TRANSITION_REQUEST_TO_STAGING_CREATED`: A user requested a model + // version be transitioned to staging. + // + // * `TRANSITION_REQUEST_TO_PRODUCTION_CREATED`: A user requested a model + // version be transitioned to production. + // + // * `TRANSITION_REQUEST_TO_ARCHIVED_CREATED`: A user requested a model + // version be archived. + Events []RegistryWebhookEvent `tfsdk:"events"` + + HttpUrlSpec *HttpUrlSpec `tfsdk:"http_url_spec"` + // Webhook ID + Id types.String `tfsdk:"id"` + + JobSpec *JobSpec `tfsdk:"job_spec"` + // Enable or disable triggering the webhook, or put the webhook into test + // mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an + // associated event happens. + // + // * `DISABLED`: Webhook is not triggered. + // + // * `TEST_MODE`: Webhook can be triggered through the test endpoint, but is + // not triggered on a real event. + Status RegistryWebhookStatus `tfsdk:"status"` +} + +type UpdateRun struct { + // Unix timestamp in milliseconds of when the run ended. + EndTime types.Int64 `tfsdk:"end_time"` + // ID of the run to update. Must be provided. + RunId types.String `tfsdk:"run_id"` + // [Deprecated, use run_id instead] ID of the run to update.. This field + // will be removed in a future MLflow version. + RunUuid types.String `tfsdk:"run_uuid"` + // Updated status of the run. + Status UpdateRunStatus `tfsdk:"status"` +} + +type UpdateRunResponse struct { + // Updated metadata of the run. + RunInfo *RunInfo `tfsdk:"run_info"` +} + +// Updated status of the run. +type UpdateRunStatus string + +const UpdateRunStatusFailed UpdateRunStatus = `FAILED` + +const UpdateRunStatusFinished UpdateRunStatus = `FINISHED` + +const UpdateRunStatusKilled UpdateRunStatus = `KILLED` + +const UpdateRunStatusRunning UpdateRunStatus = `RUNNING` + +const UpdateRunStatusScheduled UpdateRunStatus = `SCHEDULED` + +// String representation for [fmt.Print] +func (f *UpdateRunStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateRunStatus) Set(v string) error { + switch v { + case `FAILED`, `FINISHED`, `KILLED`, `RUNNING`, `SCHEDULED`: + *f = UpdateRunStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED", "FINISHED", "KILLED", "RUNNING", "SCHEDULED"`, v) + } +} + +// Type always returns UpdateRunStatus to satisfy [pflag.Value] interface +func (f *UpdateRunStatus) Type() string { + return "UpdateRunStatus" +} + +type UpdateWebhookResponse struct { +} diff --git a/service/oauth2_tf/model.go b/service/oauth2_tf/model.go new file mode 100755 index 0000000000..da3e4bb998 --- /dev/null +++ b/service/oauth2_tf/model.go @@ -0,0 +1,246 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package oauth2_tf + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type CreateCustomAppIntegration struct { + // indicates if an oauth client-secret should be generated + Confidential types.Bool `tfsdk:"confidential"` + // name of the custom oauth app + Name types.String `tfsdk:"name"` + // List of oauth redirect urls + RedirectUrls []types.String `tfsdk:"redirect_urls"` + // OAuth scopes granted to the application. Supported scopes: all-apis, sql, + // offline_access, openid, profile, email. + Scopes []types.String `tfsdk:"scopes"` + // Token access policy + TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy"` +} + +type CreateCustomAppIntegrationOutput struct { + // oauth client-id generated by the Databricks + ClientId types.String `tfsdk:"client_id"` + // oauth client-secret generated by the Databricks if this is a confidential + // oauth app client-secret will be generated. + ClientSecret types.String `tfsdk:"client_secret"` + // unique integration id for the custom oauth app + IntegrationId types.String `tfsdk:"integration_id"` +} + +type CreatePublishedAppIntegration struct { + // app_id of the oauth published app integration. For example power-bi, + // tableau-deskop + AppId types.String `tfsdk:"app_id"` + // Token access policy + TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy"` +} + +type CreatePublishedAppIntegrationOutput struct { + // unique integration id for the published oauth app + IntegrationId types.String `tfsdk:"integration_id"` +} + +// Create service principal secret +type CreateServicePrincipalSecretRequest struct { + // The service principal ID. + ServicePrincipalId types.Int64 `tfsdk:"-" url:"-"` +} + +type CreateServicePrincipalSecretResponse struct { + // UTC time when the secret was created + CreateTime types.String `tfsdk:"create_time"` + // ID of the secret + Id types.String `tfsdk:"id"` + // Secret Value + Secret types.String `tfsdk:"secret"` + // Secret Hash + SecretHash types.String `tfsdk:"secret_hash"` + // Status of the secret + Status types.String `tfsdk:"status"` + // UTC time when the secret was updated + UpdateTime types.String `tfsdk:"update_time"` +} + +type DataPlaneInfo struct { + // Authorization details as a string. + AuthorizationDetails types.String `tfsdk:"authorization_details"` + // The URL of the endpoint for this operation in the dataplane. + EndpointUrl types.String `tfsdk:"endpoint_url"` +} + +type DeleteCustomAppIntegrationOutput struct { +} + +// Delete Custom OAuth App Integration +type DeleteCustomAppIntegrationRequest struct { + // The oauth app integration ID. + IntegrationId types.String `tfsdk:"-" url:"-"` +} + +type DeletePublishedAppIntegrationOutput struct { +} + +// Delete Published OAuth App Integration +type DeletePublishedAppIntegrationRequest struct { + // The oauth app integration ID. + IntegrationId types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete service principal secret +type DeleteServicePrincipalSecretRequest struct { + // The secret ID. + SecretId types.String `tfsdk:"-" url:"-"` + // The service principal ID. + ServicePrincipalId types.Int64 `tfsdk:"-" url:"-"` +} + +type GetCustomAppIntegrationOutput struct { + // oauth client id of the custom oauth app + ClientId types.String `tfsdk:"client_id"` + // indicates if an oauth client-secret should be generated + Confidential types.Bool `tfsdk:"confidential"` + // ID of this custom app + IntegrationId types.String `tfsdk:"integration_id"` + // name of the custom oauth app + Name types.String `tfsdk:"name"` + // List of oauth redirect urls + RedirectUrls []types.String `tfsdk:"redirect_urls"` + // Token access policy + TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy"` +} + +// Get OAuth Custom App Integration +type GetCustomAppIntegrationRequest struct { + // The oauth app integration ID. + IntegrationId types.String `tfsdk:"-" url:"-"` +} + +type GetCustomAppIntegrationsOutput struct { + // Array of Custom OAuth App Integrations defined for the account. + Apps []GetCustomAppIntegrationOutput `tfsdk:"apps"` +} + +type GetPublishedAppIntegrationOutput struct { + // app-id of the published app integration + AppId types.String `tfsdk:"app_id"` + // unique integration id for the published oauth app + IntegrationId types.String `tfsdk:"integration_id"` + // name of the published oauth app + Name types.String `tfsdk:"name"` + // Token access policy + TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy"` +} + +// Get OAuth Published App Integration +type GetPublishedAppIntegrationRequest struct { + // The oauth app integration ID. + IntegrationId types.String `tfsdk:"-" url:"-"` +} + +type GetPublishedAppIntegrationsOutput struct { + // Array of Published OAuth App Integrations defined for the account. + Apps []GetPublishedAppIntegrationOutput `tfsdk:"apps"` +} + +type GetPublishedAppsOutput struct { + // Array of Published OAuth Apps. + Apps []PublishedAppOutput `tfsdk:"apps"` + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// Get all the published OAuth apps +type ListOAuthPublishedAppsRequest struct { + // The max number of OAuth published apps to return. + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + // A token that can be used to get the next page of results. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +// List service principal secrets +type ListServicePrincipalSecretsRequest struct { + // The service principal ID. + ServicePrincipalId types.Int64 `tfsdk:"-" url:"-"` +} + +type ListServicePrincipalSecretsResponse struct { + // List of the secrets + Secrets []SecretInfo `tfsdk:"secrets"` +} + +type PublishedAppOutput struct { + // Unique ID of the published OAuth app. + AppId types.String `tfsdk:"app_id"` + // Client ID of the published OAuth app. It is the client_id in the OAuth + // flow + ClientId types.String `tfsdk:"client_id"` + // Description of the published OAuth app. + Description types.String `tfsdk:"description"` + // Whether the published OAuth app is a confidential client. It is always + // false for published OAuth apps. + IsConfidentialClient types.Bool `tfsdk:"is_confidential_client"` + // Name of the published OAuth app. + Name types.String `tfsdk:"name"` + // Redirect URLs of the published OAuth app. + RedirectUrls []types.String `tfsdk:"redirect_urls"` + // Required scopes for the published OAuth app. + Scopes []types.String `tfsdk:"scopes"` +} + +type SecretInfo struct { + // UTC time when the secret was created + CreateTime types.String `tfsdk:"create_time"` + // ID of the secret + Id types.String `tfsdk:"id"` + // Secret Hash + SecretHash types.String `tfsdk:"secret_hash"` + // Status of the secret + Status types.String `tfsdk:"status"` + // UTC time when the secret was updated + UpdateTime types.String `tfsdk:"update_time"` +} + +type TokenAccessPolicy struct { + // access token time to live in minutes + AccessTokenTtlInMinutes types.Int64 `tfsdk:"access_token_ttl_in_minutes"` + // refresh token time to live in minutes + RefreshTokenTtlInMinutes types.Int64 `tfsdk:"refresh_token_ttl_in_minutes"` +} + +type UpdateCustomAppIntegration struct { + // The oauth app integration ID. + IntegrationId types.String `tfsdk:"-" url:"-"` + // List of oauth redirect urls to be updated in the custom oauth app + // integration + RedirectUrls []types.String `tfsdk:"redirect_urls"` + // Token access policy to be updated in the custom oauth app integration + TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy"` +} + +type UpdateCustomAppIntegrationOutput struct { +} + +type UpdatePublishedAppIntegration struct { + // The oauth app integration ID. + IntegrationId types.String `tfsdk:"-" url:"-"` + // Token access policy to be updated in the published oauth app integration + TokenAccessPolicy *TokenAccessPolicy `tfsdk:"token_access_policy"` +} + +type UpdatePublishedAppIntegrationOutput struct { +} diff --git a/service/pipelines_tf/model.go b/service/pipelines_tf/model.go new file mode 100755 index 0000000000..7628d55f4a --- /dev/null +++ b/service/pipelines_tf/model.go @@ -0,0 +1,1279 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package pipelines_tf + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type CreatePipeline struct { + // If false, deployment will fail if name conflicts with that of another + // pipeline. + AllowDuplicateNames types.Bool `tfsdk:"allow_duplicate_names"` + // A catalog in Unity Catalog to publish data from this pipeline to. If + // `target` is specified, tables in this pipeline are published to a + // `target` schema inside `catalog` (for example, + // `catalog`.`target`.`table`). If `target` is not specified, no data is + // published to Unity Catalog. + Catalog types.String `tfsdk:"catalog"` + // DLT Release Channel that specifies which version to use. + Channel types.String `tfsdk:"channel"` + // Cluster settings for this pipeline deployment. + Clusters []PipelineCluster `tfsdk:"clusters"` + // String-String configuration for this pipeline execution. + Configuration map[string]types.String `tfsdk:"configuration"` + // Whether the pipeline is continuous or triggered. This replaces `trigger`. + Continuous types.Bool `tfsdk:"continuous"` + // Deployment type of this pipeline. + Deployment *PipelineDeployment `tfsdk:"deployment"` + // Whether the pipeline is in Development mode. Defaults to false. + Development types.Bool `tfsdk:"development"` + + DryRun types.Bool `tfsdk:"dry_run"` + // Pipeline product edition. + Edition types.String `tfsdk:"edition"` + // Filters on which Pipeline packages to include in the deployed graph. + Filters *Filters `tfsdk:"filters"` + // The definition of a gateway pipeline to support CDC. + GatewayDefinition *IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition"` + // Unique identifier for this pipeline. + Id types.String `tfsdk:"id"` + // The configuration for a managed ingestion pipeline. These settings cannot + // be used with the 'libraries', 'target' or 'catalog' settings. + IngestionDefinition *ManagedIngestionPipelineDefinition `tfsdk:"ingestion_definition"` + // Libraries or code needed by this deployment. + Libraries []PipelineLibrary `tfsdk:"libraries"` + // Friendly identifier for this pipeline. + Name types.String `tfsdk:"name"` + // List of notification settings for this pipeline. + Notifications []Notifications `tfsdk:"notifications"` + // Whether Photon is enabled for this pipeline. + Photon types.Bool `tfsdk:"photon"` + // Whether serverless compute is enabled for this pipeline. + Serverless types.Bool `tfsdk:"serverless"` + // DBFS root directory for storing checkpoints and tables. + Storage types.String `tfsdk:"storage"` + // Target schema (database) to add tables in this pipeline to. If not + // specified, no data is published to the Hive metastore or Unity Catalog. + // To publish to Unity Catalog, also specify `catalog`. + Target types.String `tfsdk:"target"` + // Which pipeline trigger to use. Deprecated: Use `continuous` instead. + Trigger *PipelineTrigger `tfsdk:"trigger"` +} + +type CreatePipelineResponse struct { + // Only returned when dry_run is true. + EffectiveSettings *PipelineSpec `tfsdk:"effective_settings"` + // The unique identifier for the newly created pipeline. Only returned when + // dry_run is false. + PipelineId types.String `tfsdk:"pipeline_id"` +} + +type CronTrigger struct { + QuartzCronSchedule types.String `tfsdk:"quartz_cron_schedule"` + + TimezoneId types.String `tfsdk:"timezone_id"` +} + +type DataPlaneId struct { + // The instance name of the data plane emitting an event. + Instance types.String `tfsdk:"instance"` + // A sequence number, unique and increasing within the data plane instance. + SeqNo types.Int64 `tfsdk:"seq_no"` +} + +// Delete a pipeline +type DeletePipelineRequest struct { + PipelineId types.String `tfsdk:"-" url:"-"` +} + +type DeletePipelineResponse struct { +} + +// The deployment method that manages the pipeline: - BUNDLE: The pipeline is +// managed by a Databricks Asset Bundle. +type DeploymentKind string + +const DeploymentKindBundle DeploymentKind = `BUNDLE` + +// String representation for [fmt.Print] +func (f *DeploymentKind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeploymentKind) Set(v string) error { + switch v { + case `BUNDLE`: + *f = DeploymentKind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BUNDLE"`, v) + } +} + +// Type always returns DeploymentKind to satisfy [pflag.Value] interface +func (f *DeploymentKind) Type() string { + return "DeploymentKind" +} + +type EditPipeline struct { + // If false, deployment will fail if name has changed and conflicts the name + // of another pipeline. + AllowDuplicateNames types.Bool `tfsdk:"allow_duplicate_names"` + // A catalog in Unity Catalog to publish data from this pipeline to. If + // `target` is specified, tables in this pipeline are published to a + // `target` schema inside `catalog` (for example, + // `catalog`.`target`.`table`). If `target` is not specified, no data is + // published to Unity Catalog. + Catalog types.String `tfsdk:"catalog"` + // DLT Release Channel that specifies which version to use. + Channel types.String `tfsdk:"channel"` + // Cluster settings for this pipeline deployment. + Clusters []PipelineCluster `tfsdk:"clusters"` + // String-String configuration for this pipeline execution. + Configuration map[string]types.String `tfsdk:"configuration"` + // Whether the pipeline is continuous or triggered. This replaces `trigger`. + Continuous types.Bool `tfsdk:"continuous"` + // Deployment type of this pipeline. + Deployment *PipelineDeployment `tfsdk:"deployment"` + // Whether the pipeline is in Development mode. Defaults to false. + Development types.Bool `tfsdk:"development"` + // Pipeline product edition. + Edition types.String `tfsdk:"edition"` + // If present, the last-modified time of the pipeline settings before the + // edit. If the settings were modified after that time, then the request + // will fail with a conflict. + ExpectedLastModified types.Int64 `tfsdk:"expected_last_modified"` + // Filters on which Pipeline packages to include in the deployed graph. + Filters *Filters `tfsdk:"filters"` + // The definition of a gateway pipeline to support CDC. + GatewayDefinition *IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition"` + // Unique identifier for this pipeline. + Id types.String `tfsdk:"id"` + // The configuration for a managed ingestion pipeline. These settings cannot + // be used with the 'libraries', 'target' or 'catalog' settings. + IngestionDefinition *ManagedIngestionPipelineDefinition `tfsdk:"ingestion_definition"` + // Libraries or code needed by this deployment. + Libraries []PipelineLibrary `tfsdk:"libraries"` + // Friendly identifier for this pipeline. + Name types.String `tfsdk:"name"` + // List of notification settings for this pipeline. + Notifications []Notifications `tfsdk:"notifications"` + // Whether Photon is enabled for this pipeline. + Photon types.Bool `tfsdk:"photon"` + // Unique identifier for this pipeline. + PipelineId types.String `tfsdk:"pipeline_id" url:"-"` + // Whether serverless compute is enabled for this pipeline. + Serverless types.Bool `tfsdk:"serverless"` + // DBFS root directory for storing checkpoints and tables. + Storage types.String `tfsdk:"storage"` + // Target schema (database) to add tables in this pipeline to. If not + // specified, no data is published to the Hive metastore or Unity Catalog. + // To publish to Unity Catalog, also specify `catalog`. + Target types.String `tfsdk:"target"` + // Which pipeline trigger to use. Deprecated: Use `continuous` instead. + Trigger *PipelineTrigger `tfsdk:"trigger"` +} + +type EditPipelineResponse struct { +} + +type ErrorDetail struct { + // The exception thrown for this error, with its chain of cause. + Exceptions []SerializedException `tfsdk:"exceptions"` + // Whether this error is considered fatal, that is, unrecoverable. + Fatal types.Bool `tfsdk:"fatal"` +} + +// The severity level of the event. +type EventLevel string + +const EventLevelError EventLevel = `ERROR` + +const EventLevelInfo EventLevel = `INFO` + +const EventLevelMetrics EventLevel = `METRICS` + +const EventLevelWarn EventLevel = `WARN` + +// String representation for [fmt.Print] +func (f *EventLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EventLevel) Set(v string) error { + switch v { + case `ERROR`, `INFO`, `METRICS`, `WARN`: + *f = EventLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ERROR", "INFO", "METRICS", "WARN"`, v) + } +} + +// Type always returns EventLevel to satisfy [pflag.Value] interface +func (f *EventLevel) Type() string { + return "EventLevel" +} + +type FileLibrary struct { + // The absolute path of the file. + Path types.String `tfsdk:"path"` +} + +type Filters struct { + // Paths to exclude. + Exclude []types.String `tfsdk:"exclude"` + // Paths to include. + Include []types.String `tfsdk:"include"` +} + +// Get pipeline permission levels +type GetPipelinePermissionLevelsRequest struct { + // The pipeline for which to get or manage permissions. + PipelineId types.String `tfsdk:"-" url:"-"` +} + +type GetPipelinePermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []PipelinePermissionsDescription `tfsdk:"permission_levels"` +} + +// Get pipeline permissions +type GetPipelinePermissionsRequest struct { + // The pipeline for which to get or manage permissions. + PipelineId types.String `tfsdk:"-" url:"-"` +} + +// Get a pipeline +type GetPipelineRequest struct { + PipelineId types.String `tfsdk:"-" url:"-"` +} + +type GetPipelineResponse struct { + // An optional message detailing the cause of the pipeline state. + Cause types.String `tfsdk:"cause"` + // The ID of the cluster that the pipeline is running on. + ClusterId types.String `tfsdk:"cluster_id"` + // The username of the pipeline creator. + CreatorUserName types.String `tfsdk:"creator_user_name"` + // The health of a pipeline. + Health GetPipelineResponseHealth `tfsdk:"health"` + // The last time the pipeline settings were modified or created. + LastModified types.Int64 `tfsdk:"last_modified"` + // Status of the latest updates for the pipeline. Ordered with the newest + // update first. + LatestUpdates []UpdateStateInfo `tfsdk:"latest_updates"` + // A human friendly identifier for the pipeline, taken from the `spec`. + Name types.String `tfsdk:"name"` + // The ID of the pipeline. + PipelineId types.String `tfsdk:"pipeline_id"` + // Username of the user that the pipeline will run on behalf of. + RunAsUserName types.String `tfsdk:"run_as_user_name"` + // The pipeline specification. This field is not returned when called by + // `ListPipelines`. + Spec *PipelineSpec `tfsdk:"spec"` + // The pipeline state. + State PipelineState `tfsdk:"state"` +} + +// The health of a pipeline. +type GetPipelineResponseHealth string + +const GetPipelineResponseHealthHealthy GetPipelineResponseHealth = `HEALTHY` + +const GetPipelineResponseHealthUnhealthy GetPipelineResponseHealth = `UNHEALTHY` + +// String representation for [fmt.Print] +func (f *GetPipelineResponseHealth) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetPipelineResponseHealth) Set(v string) error { + switch v { + case `HEALTHY`, `UNHEALTHY`: + *f = GetPipelineResponseHealth(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "HEALTHY", "UNHEALTHY"`, v) + } +} + +// Type always returns GetPipelineResponseHealth to satisfy [pflag.Value] interface +func (f *GetPipelineResponseHealth) Type() string { + return "GetPipelineResponseHealth" +} + +// Get a pipeline update +type GetUpdateRequest struct { + // The ID of the pipeline. + PipelineId types.String `tfsdk:"-" url:"-"` + // The ID of the update. + UpdateId types.String `tfsdk:"-" url:"-"` +} + +type GetUpdateResponse struct { + // The current update info. + Update *UpdateInfo `tfsdk:"update"` +} + +type IngestionConfig struct { + // Select tables from a specific source schema. + Schema *SchemaSpec `tfsdk:"schema"` + // Select tables from a specific source table. + Table *TableSpec `tfsdk:"table"` +} + +type IngestionGatewayPipelineDefinition struct { + // Immutable. The Unity Catalog connection this gateway pipeline uses to + // communicate with the source. + ConnectionId types.String `tfsdk:"connection_id"` + // Required, Immutable. The name of the catalog for the gateway pipeline's + // storage location. + GatewayStorageCatalog types.String `tfsdk:"gateway_storage_catalog"` + // Required. The Unity Catalog-compatible naming for the gateway storage + // location. This is the destination to use for the data that is extracted + // by the gateway. Delta Live Tables system will automatically create the + // storage location under the catalog and schema. + GatewayStorageName types.String `tfsdk:"gateway_storage_name"` + // Required, Immutable. The name of the schema for the gateway pipelines's + // storage location. + GatewayStorageSchema types.String `tfsdk:"gateway_storage_schema"` +} + +// List pipeline events +type ListPipelineEventsRequest struct { + // Criteria to select a subset of results, expressed using a SQL-like + // syntax. The supported filters are: 1. level='INFO' (or WARN or ERROR) 2. + // level in ('INFO', 'WARN') 3. id='[event-id]' 4. timestamp > 'TIMESTAMP' + // (or >=,<,<=,=) + // + // Composite expressions are supported, for example: level in ('ERROR', + // 'WARN') AND timestamp> '2021-07-22T06:37:33.083Z' + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // Max number of entries to return in a single page. The system may return + // fewer than max_results events in a response, even if there are more + // events available. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // A string indicating a sort order by timestamp for the results, for + // example, ["timestamp asc"]. The sort order can be ascending or + // descending. By default, events are returned in descending order by + // timestamp. + OrderBy []types.String `tfsdk:"-" url:"order_by,omitempty"` + // Page token returned by previous call. This field is mutually exclusive + // with all fields in this request except max_results. An error is returned + // if any fields other than max_results are set when this field is set. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + + PipelineId types.String `tfsdk:"-" url:"-"` +} + +type ListPipelineEventsResponse struct { + // The list of events matching the request criteria. + Events []PipelineEvent `tfsdk:"events"` + // If present, a token to fetch the next page of events. + NextPageToken types.String `tfsdk:"next_page_token"` + // If present, a token to fetch the previous page of events. + PrevPageToken types.String `tfsdk:"prev_page_token"` +} + +// List pipelines +type ListPipelinesRequest struct { + // Select a subset of results based on the specified criteria. The supported + // filters are: + // + // * `notebook=''` to select pipelines that reference the provided + // notebook path. * `name LIKE '[pattern]'` to select pipelines with a name + // that matches pattern. Wildcards are supported, for example: `name LIKE + // '%shopping%'` + // + // Composite filters are not supported. This field is optional. + Filter types.String `tfsdk:"-" url:"filter,omitempty"` + // The maximum number of entries to return in a single page. The system may + // return fewer than max_results events in a response, even if there are + // more events available. This field is optional. The default value is 25. + // The maximum value is 100. An error is returned if the value of + // max_results is greater than 100. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // A list of strings specifying the order of results. Supported order_by + // fields are id and name. The default is id asc. This field is optional. + OrderBy []types.String `tfsdk:"-" url:"order_by,omitempty"` + // Page token returned by previous call + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListPipelinesResponse struct { + // If present, a token to fetch the next page of events. + NextPageToken types.String `tfsdk:"next_page_token"` + // The list of events matching the request criteria. + Statuses []PipelineStateInfo `tfsdk:"statuses"` +} + +// List pipeline updates +type ListUpdatesRequest struct { + // Max number of entries to return in a single page. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Page token returned by previous call + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` + // The pipeline to return updates for. + PipelineId types.String `tfsdk:"-" url:"-"` + // If present, returns updates until and including this update_id. + UntilUpdateId types.String `tfsdk:"-" url:"until_update_id,omitempty"` +} + +type ListUpdatesResponse struct { + // If present, then there are more results, and this a token to be used in a + // subsequent request to fetch the next page. + NextPageToken types.String `tfsdk:"next_page_token"` + // If present, then this token can be used in a subsequent request to fetch + // the previous page. + PrevPageToken types.String `tfsdk:"prev_page_token"` + + Updates []UpdateInfo `tfsdk:"updates"` +} + +type ManagedIngestionPipelineDefinition struct { + // Immutable. The Unity Catalog connection this ingestion pipeline uses to + // communicate with the source. Specify either ingestion_gateway_id or + // connection_name. + ConnectionName types.String `tfsdk:"connection_name"` + // Immutable. Identifier for the ingestion gateway used by this ingestion + // pipeline to communicate with the source. Specify either + // ingestion_gateway_id or connection_name. + IngestionGatewayId types.String `tfsdk:"ingestion_gateway_id"` + // Required. Settings specifying tables to replicate and the destination for + // the replicated tables. + Objects []IngestionConfig `tfsdk:"objects"` + // Configuration settings to control the ingestion of tables. These settings + // are applied to all tables in the pipeline. + TableConfiguration *TableSpecificConfig `tfsdk:"table_configuration"` +} + +type ManualTrigger struct { +} + +// Maturity level for EventDetails. +type MaturityLevel string + +const MaturityLevelDeprecated MaturityLevel = `DEPRECATED` + +const MaturityLevelEvolving MaturityLevel = `EVOLVING` + +const MaturityLevelStable MaturityLevel = `STABLE` + +// String representation for [fmt.Print] +func (f *MaturityLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *MaturityLevel) Set(v string) error { + switch v { + case `DEPRECATED`, `EVOLVING`, `STABLE`: + *f = MaturityLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEPRECATED", "EVOLVING", "STABLE"`, v) + } +} + +// Type always returns MaturityLevel to satisfy [pflag.Value] interface +func (f *MaturityLevel) Type() string { + return "MaturityLevel" +} + +type NotebookLibrary struct { + // The absolute path of the notebook. + Path types.String `tfsdk:"path"` +} + +type Notifications struct { + // A list of alerts that trigger the sending of notifications to the + // configured destinations. The supported alerts are: + // + // * `on-update-success`: A pipeline update completes successfully. * + // `on-update-failure`: Each time a pipeline update fails. * + // `on-update-fatal-failure`: A pipeline update fails with a non-retryable + // (fatal) error. * `on-flow-failure`: A single data flow fails. + Alerts []types.String `tfsdk:"alerts"` + // A list of email addresses notified when a configured alert is triggered. + EmailRecipients []types.String `tfsdk:"email_recipients"` +} + +type Origin struct { + // The id of a batch. Unique within a flow. + BatchId types.Int64 `tfsdk:"batch_id"` + // The cloud provider, e.g., AWS or Azure. + Cloud types.String `tfsdk:"cloud"` + // The id of the cluster where an execution happens. Unique within a region. + ClusterId types.String `tfsdk:"cluster_id"` + // The name of a dataset. Unique within a pipeline. + DatasetName types.String `tfsdk:"dataset_name"` + // The id of the flow. Globally unique. Incremental queries will generally + // reuse the same id while complete queries will have a new id per update. + FlowId types.String `tfsdk:"flow_id"` + // The name of the flow. Not unique. + FlowName types.String `tfsdk:"flow_name"` + // The optional host name where the event was triggered + Host types.String `tfsdk:"host"` + // The id of a maintenance run. Globally unique. + MaintenanceId types.String `tfsdk:"maintenance_id"` + // Materialization name. + MaterializationName types.String `tfsdk:"materialization_name"` + // The org id of the user. Unique within a cloud. + OrgId types.Int64 `tfsdk:"org_id"` + // The id of the pipeline. Globally unique. + PipelineId types.String `tfsdk:"pipeline_id"` + // The name of the pipeline. Not unique. + PipelineName types.String `tfsdk:"pipeline_name"` + // The cloud region. + Region types.String `tfsdk:"region"` + // The id of the request that caused an update. + RequestId types.String `tfsdk:"request_id"` + // The id of a (delta) table. Globally unique. + TableId types.String `tfsdk:"table_id"` + // The Unity Catalog id of the MV or ST being updated. + UcResourceId types.String `tfsdk:"uc_resource_id"` + // The id of an execution. Globally unique. + UpdateId types.String `tfsdk:"update_id"` +} + +type PipelineAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel PipelinePermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type PipelineAccessControlResponse struct { + // All permissions. + AllPermissions []PipelinePermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type PipelineCluster struct { + // Note: This field won't be persisted. Only API users will check this + // field. + ApplyPolicyDefaultValues types.Bool `tfsdk:"apply_policy_default_values"` + // Parameters needed in order to automatically scale clusters up and down + // based on load. Note: autoscaling works best with DB runtime versions 3.0 + // or later. + Autoscale *PipelineClusterAutoscale `tfsdk:"autoscale"` + // Attributes related to clusters running on Amazon Web Services. If not + // specified at cluster creation, a set of default values will be used. + AwsAttributes *compute.AwsAttributes `tfsdk:"aws_attributes"` + // Attributes related to clusters running on Microsoft Azure. If not + // specified at cluster creation, a set of default values will be used. + AzureAttributes *compute.AzureAttributes `tfsdk:"azure_attributes"` + // The configuration for delivering spark logs to a long-term storage + // destination. Only dbfs destinations are supported. Only one destination + // can be specified for one cluster. If the conf is given, the logs will be + // delivered to the destination every `5 mins`. The destination of driver + // logs is `$destination/$clusterId/driver`, while the destination of + // executor logs is `$destination/$clusterId/executor`. + ClusterLogConf *compute.ClusterLogConf `tfsdk:"cluster_log_conf"` + // Additional tags for cluster resources. Databricks will tag all cluster + // resources (e.g., AWS instances and EBS volumes) with these tags in + // addition to `default_tags`. Notes: + // + // - Currently, Databricks allows at most 45 custom tags + // + // - Clusters can only reuse cloud resources if the resources' tags are a + // subset of the cluster tags + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // The optional ID of the instance pool for the driver of the cluster + // belongs. The pool cluster uses the instance pool with id + // (instance_pool_id) if the driver pool is not assigned. + DriverInstancePoolId types.String `tfsdk:"driver_instance_pool_id"` + // The node type of the Spark driver. Note that this field is optional; if + // unset, the driver node type will be set as the same value as + // `node_type_id` defined above. + DriverNodeTypeId types.String `tfsdk:"driver_node_type_id"` + // Attributes related to clusters running on Google Cloud Platform. If not + // specified at cluster creation, a set of default values will be used. + GcpAttributes *compute.GcpAttributes `tfsdk:"gcp_attributes"` + // The configuration for storing init scripts. Any number of destinations + // can be specified. The scripts are executed sequentially in the order + // provided. If `cluster_log_conf` is specified, init script logs are sent + // to `//init_scripts`. + InitScripts []compute.InitScriptInfo `tfsdk:"init_scripts"` + // The optional ID of the instance pool to which the cluster belongs. + InstancePoolId types.String `tfsdk:"instance_pool_id"` + // A label for the cluster specification, either `default` to configure the + // default cluster, or `maintenance` to configure the maintenance cluster. + // This field is optional. The default value is `default`. + Label types.String `tfsdk:"label"` + // This field encodes, through a single value, the resources available to + // each of the Spark nodes in this cluster. For example, the Spark nodes can + // be provisioned and optimized for memory or compute intensive workloads. A + // list of available node types can be retrieved by using the + // :method:clusters/listNodeTypes API call. + NodeTypeId types.String `tfsdk:"node_type_id"` + // Number of worker nodes that this cluster should have. A cluster has one + // Spark Driver and `num_workers` Executors for a total of `num_workers` + 1 + // Spark nodes. + // + // Note: When reading the properties of a cluster, this field reflects the + // desired number of workers rather than the actual current number of + // workers. For instance, if a cluster is resized from 5 to 10 workers, this + // field will immediately be updated to reflect the target size of 10 + // workers, whereas the workers listed in `spark_info` will gradually + // increase from 5 to 10 as the new nodes are provisioned. + NumWorkers types.Int64 `tfsdk:"num_workers"` + // The ID of the cluster policy used to create the cluster if applicable. + PolicyId types.String `tfsdk:"policy_id"` + // An object containing a set of optional, user-specified Spark + // configuration key-value pairs. See :method:clusters/create for more + // details. + SparkConf map[string]types.String `tfsdk:"spark_conf"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs. Please note that key-value pair of the form + // (X,Y) will be exported as is (i.e., `export X='Y'`) while launching the + // driver and workers. + // + // In order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we + // recommend appending them to `$SPARK_DAEMON_JAVA_OPTS` as shown in the + // example below. This ensures that all default databricks managed + // environmental variables are included as well. + // + // Example Spark environment variables: `{"SPARK_WORKER_MEMORY": "28000m", + // "SPARK_LOCAL_DIRS": "/local_disk0"}` or `{"SPARK_DAEMON_JAVA_OPTS": + // "$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true"}` + SparkEnvVars map[string]types.String `tfsdk:"spark_env_vars"` + // SSH public key contents that will be added to each Spark node in this + // cluster. The corresponding private keys can be used to login with the + // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. + SshPublicKeys []types.String `tfsdk:"ssh_public_keys"` +} + +type PipelineClusterAutoscale struct { + // The maximum number of workers to which the cluster can scale up when + // overloaded. `max_workers` must be strictly greater than `min_workers`. + MaxWorkers types.Int64 `tfsdk:"max_workers"` + // The minimum number of workers the cluster can scale down to when + // underutilized. It is also the initial number of workers the cluster will + // have after creation. + MinWorkers types.Int64 `tfsdk:"min_workers"` + // Databricks Enhanced Autoscaling optimizes cluster utilization by + // automatically allocating cluster resources based on workload volume, with + // minimal impact to the data processing latency of your pipelines. Enhanced + // Autoscaling is available for `updates` clusters only. The legacy + // autoscaling feature is used for `maintenance` clusters. + Mode PipelineClusterAutoscaleMode `tfsdk:"mode"` +} + +// Databricks Enhanced Autoscaling optimizes cluster utilization by +// automatically allocating cluster resources based on workload volume, with +// minimal impact to the data processing latency of your pipelines. Enhanced +// Autoscaling is available for `updates` clusters only. The legacy autoscaling +// feature is used for `maintenance` clusters. +type PipelineClusterAutoscaleMode string + +const PipelineClusterAutoscaleModeEnhanced PipelineClusterAutoscaleMode = `ENHANCED` + +const PipelineClusterAutoscaleModeLegacy PipelineClusterAutoscaleMode = `LEGACY` + +// String representation for [fmt.Print] +func (f *PipelineClusterAutoscaleMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PipelineClusterAutoscaleMode) Set(v string) error { + switch v { + case `ENHANCED`, `LEGACY`: + *f = PipelineClusterAutoscaleMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ENHANCED", "LEGACY"`, v) + } +} + +// Type always returns PipelineClusterAutoscaleMode to satisfy [pflag.Value] interface +func (f *PipelineClusterAutoscaleMode) Type() string { + return "PipelineClusterAutoscaleMode" +} + +type PipelineDeployment struct { + // The deployment method that manages the pipeline. + Kind DeploymentKind `tfsdk:"kind"` + // The path to the file containing metadata about the deployment. + MetadataFilePath types.String `tfsdk:"metadata_file_path"` +} + +type PipelineEvent struct { + // Information about an error captured by the event. + Error *ErrorDetail `tfsdk:"error"` + // The event type. Should always correspond to the details + EventType types.String `tfsdk:"event_type"` + // A time-based, globally unique id. + Id types.String `tfsdk:"id"` + // The severity level of the event. + Level EventLevel `tfsdk:"level"` + // Maturity level for event_type. + MaturityLevel MaturityLevel `tfsdk:"maturity_level"` + // The display message associated with the event. + Message types.String `tfsdk:"message"` + // Describes where the event originates from. + Origin *Origin `tfsdk:"origin"` + // A sequencing object to identify and order events. + Sequence *Sequencing `tfsdk:"sequence"` + // The time of the event. + Timestamp types.String `tfsdk:"timestamp"` +} + +type PipelineLibrary struct { + // The path to a file that defines a pipeline and is stored in the + // Databricks Repos. + File *FileLibrary `tfsdk:"file"` + // URI of the jar to be installed. Currently only DBFS is supported. + Jar types.String `tfsdk:"jar"` + // Specification of a maven library to be installed. + Maven *compute.MavenLibrary `tfsdk:"maven"` + // The path to a notebook that defines a pipeline and is stored in the + // Databricks workspace. + Notebook *NotebookLibrary `tfsdk:"notebook"` +} + +type PipelinePermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel PipelinePermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type PipelinePermissionLevel string + +const PipelinePermissionLevelCanManage PipelinePermissionLevel = `CAN_MANAGE` + +const PipelinePermissionLevelCanRun PipelinePermissionLevel = `CAN_RUN` + +const PipelinePermissionLevelCanView PipelinePermissionLevel = `CAN_VIEW` + +const PipelinePermissionLevelIsOwner PipelinePermissionLevel = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *PipelinePermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PipelinePermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_RUN`, `CAN_VIEW`, `IS_OWNER`: + *f = PipelinePermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_RUN", "CAN_VIEW", "IS_OWNER"`, v) + } +} + +// Type always returns PipelinePermissionLevel to satisfy [pflag.Value] interface +func (f *PipelinePermissionLevel) Type() string { + return "PipelinePermissionLevel" +} + +type PipelinePermissions struct { + AccessControlList []PipelineAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type PipelinePermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel PipelinePermissionLevel `tfsdk:"permission_level"` +} + +type PipelinePermissionsRequest struct { + AccessControlList []PipelineAccessControlRequest `tfsdk:"access_control_list"` + // The pipeline for which to get or manage permissions. + PipelineId types.String `tfsdk:"-" url:"-"` +} + +type PipelineSpec struct { + // A catalog in Unity Catalog to publish data from this pipeline to. If + // `target` is specified, tables in this pipeline are published to a + // `target` schema inside `catalog` (for example, + // `catalog`.`target`.`table`). If `target` is not specified, no data is + // published to Unity Catalog. + Catalog types.String `tfsdk:"catalog"` + // DLT Release Channel that specifies which version to use. + Channel types.String `tfsdk:"channel"` + // Cluster settings for this pipeline deployment. + Clusters []PipelineCluster `tfsdk:"clusters"` + // String-String configuration for this pipeline execution. + Configuration map[string]types.String `tfsdk:"configuration"` + // Whether the pipeline is continuous or triggered. This replaces `trigger`. + Continuous types.Bool `tfsdk:"continuous"` + // Deployment type of this pipeline. + Deployment *PipelineDeployment `tfsdk:"deployment"` + // Whether the pipeline is in Development mode. Defaults to false. + Development types.Bool `tfsdk:"development"` + // Pipeline product edition. + Edition types.String `tfsdk:"edition"` + // Filters on which Pipeline packages to include in the deployed graph. + Filters *Filters `tfsdk:"filters"` + // The definition of a gateway pipeline to support CDC. + GatewayDefinition *IngestionGatewayPipelineDefinition `tfsdk:"gateway_definition"` + // Unique identifier for this pipeline. + Id types.String `tfsdk:"id"` + // The configuration for a managed ingestion pipeline. These settings cannot + // be used with the 'libraries', 'target' or 'catalog' settings. + IngestionDefinition *ManagedIngestionPipelineDefinition `tfsdk:"ingestion_definition"` + // Libraries or code needed by this deployment. + Libraries []PipelineLibrary `tfsdk:"libraries"` + // Friendly identifier for this pipeline. + Name types.String `tfsdk:"name"` + // List of notification settings for this pipeline. + Notifications []Notifications `tfsdk:"notifications"` + // Whether Photon is enabled for this pipeline. + Photon types.Bool `tfsdk:"photon"` + // Whether serverless compute is enabled for this pipeline. + Serverless types.Bool `tfsdk:"serverless"` + // DBFS root directory for storing checkpoints and tables. + Storage types.String `tfsdk:"storage"` + // Target schema (database) to add tables in this pipeline to. If not + // specified, no data is published to the Hive metastore or Unity Catalog. + // To publish to Unity Catalog, also specify `catalog`. + Target types.String `tfsdk:"target"` + // Which pipeline trigger to use. Deprecated: Use `continuous` instead. + Trigger *PipelineTrigger `tfsdk:"trigger"` +} + +// The pipeline state. +type PipelineState string + +const PipelineStateDeleted PipelineState = `DELETED` + +const PipelineStateDeploying PipelineState = `DEPLOYING` + +const PipelineStateFailed PipelineState = `FAILED` + +const PipelineStateIdle PipelineState = `IDLE` + +const PipelineStateRecovering PipelineState = `RECOVERING` + +const PipelineStateResetting PipelineState = `RESETTING` + +const PipelineStateRunning PipelineState = `RUNNING` + +const PipelineStateStarting PipelineState = `STARTING` + +const PipelineStateStopping PipelineState = `STOPPING` + +// String representation for [fmt.Print] +func (f *PipelineState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PipelineState) Set(v string) error { + switch v { + case `DELETED`, `DEPLOYING`, `FAILED`, `IDLE`, `RECOVERING`, `RESETTING`, `RUNNING`, `STARTING`, `STOPPING`: + *f = PipelineState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETED", "DEPLOYING", "FAILED", "IDLE", "RECOVERING", "RESETTING", "RUNNING", "STARTING", "STOPPING"`, v) + } +} + +// Type always returns PipelineState to satisfy [pflag.Value] interface +func (f *PipelineState) Type() string { + return "PipelineState" +} + +type PipelineStateInfo struct { + // The unique identifier of the cluster running the pipeline. + ClusterId types.String `tfsdk:"cluster_id"` + // The username of the pipeline creator. + CreatorUserName types.String `tfsdk:"creator_user_name"` + // Status of the latest updates for the pipeline. Ordered with the newest + // update first. + LatestUpdates []UpdateStateInfo `tfsdk:"latest_updates"` + // The user-friendly name of the pipeline. + Name types.String `tfsdk:"name"` + // The unique identifier of the pipeline. + PipelineId types.String `tfsdk:"pipeline_id"` + // The username that the pipeline runs as. This is a read only value derived + // from the pipeline owner. + RunAsUserName types.String `tfsdk:"run_as_user_name"` + // The pipeline state. + State PipelineState `tfsdk:"state"` +} + +type PipelineTrigger struct { + Cron *CronTrigger `tfsdk:"cron"` + + Manual *ManualTrigger `tfsdk:"manual"` +} + +type SchemaSpec struct { + // Required. Destination catalog to store tables. + DestinationCatalog types.String `tfsdk:"destination_catalog"` + // Required. Destination schema to store tables in. Tables with the same + // name as the source tables are created in this destination schema. The + // pipeline fails If a table with the same name already exists. + DestinationSchema types.String `tfsdk:"destination_schema"` + // The source catalog name. Might be optional depending on the type of + // source. + SourceCatalog types.String `tfsdk:"source_catalog"` + // Required. Schema name in the source database. + SourceSchema types.String `tfsdk:"source_schema"` + // Configuration settings to control the ingestion of tables. These settings + // are applied to all tables in this schema and override the + // table_configuration defined in the ManagedIngestionPipelineDefinition + // object. + TableConfiguration *TableSpecificConfig `tfsdk:"table_configuration"` +} + +type Sequencing struct { + // A sequence number, unique and increasing within the control plane. + ControlPlaneSeqNo types.Int64 `tfsdk:"control_plane_seq_no"` + // the ID assigned by the data plane. + DataPlaneId *DataPlaneId `tfsdk:"data_plane_id"` +} + +type SerializedException struct { + // Runtime class of the exception + ClassName types.String `tfsdk:"class_name"` + // Exception message + Message types.String `tfsdk:"message"` + // Stack trace consisting of a list of stack frames + Stack []StackFrame `tfsdk:"stack"` +} + +type StackFrame struct { + // Class from which the method call originated + DeclaringClass types.String `tfsdk:"declaring_class"` + // File where the method is defined + FileName types.String `tfsdk:"file_name"` + // Line from which the method was called + LineNumber types.Int64 `tfsdk:"line_number"` + // Name of the method which was called + MethodName types.String `tfsdk:"method_name"` +} + +type StartUpdate struct { + Cause StartUpdateCause `tfsdk:"cause"` + // If true, this update will reset all tables before running. + FullRefresh types.Bool `tfsdk:"full_refresh"` + // A list of tables to update with fullRefresh. If both refresh_selection + // and full_refresh_selection are empty, this is a full graph update. Full + // Refresh on a table means that the states of the table will be reset + // before the refresh. + FullRefreshSelection []types.String `tfsdk:"full_refresh_selection"` + + PipelineId types.String `tfsdk:"-" url:"-"` + // A list of tables to update without fullRefresh. If both refresh_selection + // and full_refresh_selection are empty, this is a full graph update. Full + // Refresh on a table means that the states of the table will be reset + // before the refresh. + RefreshSelection []types.String `tfsdk:"refresh_selection"` + // If true, this update only validates the correctness of pipeline source + // code but does not materialize or publish any datasets. + ValidateOnly types.Bool `tfsdk:"validate_only"` +} + +type StartUpdateCause string + +const StartUpdateCauseApiCall StartUpdateCause = `API_CALL` + +const StartUpdateCauseJobTask StartUpdateCause = `JOB_TASK` + +const StartUpdateCauseRetryOnFailure StartUpdateCause = `RETRY_ON_FAILURE` + +const StartUpdateCauseSchemaChange StartUpdateCause = `SCHEMA_CHANGE` + +const StartUpdateCauseServiceUpgrade StartUpdateCause = `SERVICE_UPGRADE` + +const StartUpdateCauseUserAction StartUpdateCause = `USER_ACTION` + +// String representation for [fmt.Print] +func (f *StartUpdateCause) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *StartUpdateCause) Set(v string) error { + switch v { + case `API_CALL`, `JOB_TASK`, `RETRY_ON_FAILURE`, `SCHEMA_CHANGE`, `SERVICE_UPGRADE`, `USER_ACTION`: + *f = StartUpdateCause(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "API_CALL", "JOB_TASK", "RETRY_ON_FAILURE", "SCHEMA_CHANGE", "SERVICE_UPGRADE", "USER_ACTION"`, v) + } +} + +// Type always returns StartUpdateCause to satisfy [pflag.Value] interface +func (f *StartUpdateCause) Type() string { + return "StartUpdateCause" +} + +type StartUpdateResponse struct { + UpdateId types.String `tfsdk:"update_id"` +} + +type StopPipelineResponse struct { +} + +// Stop a pipeline +type StopRequest struct { + PipelineId types.String `tfsdk:"-" url:"-"` +} + +type TableSpec struct { + // Required. Destination catalog to store table. + DestinationCatalog types.String `tfsdk:"destination_catalog"` + // Required. Destination schema to store table. + DestinationSchema types.String `tfsdk:"destination_schema"` + // Optional. Destination table name. The pipeline fails If a table with that + // name already exists. If not set, the source table name is used. + DestinationTable types.String `tfsdk:"destination_table"` + // Source catalog name. Might be optional depending on the type of source. + SourceCatalog types.String `tfsdk:"source_catalog"` + // Schema name in the source database. Might be optional depending on the + // type of source. + SourceSchema types.String `tfsdk:"source_schema"` + // Required. Table name in the source database. + SourceTable types.String `tfsdk:"source_table"` + // Configuration settings to control the ingestion of tables. These settings + // override the table_configuration defined in the + // ManagedIngestionPipelineDefinition object and the SchemaSpec. + TableConfiguration *TableSpecificConfig `tfsdk:"table_configuration"` +} + +type TableSpecificConfig struct { + // The primary key of the table used to apply changes. + PrimaryKeys []types.String `tfsdk:"primary_keys"` + // If true, formula fields defined in the table are included in the + // ingestion. This setting is only valid for the Salesforce connector + SalesforceIncludeFormulaFields types.Bool `tfsdk:"salesforce_include_formula_fields"` + // The SCD type to use to ingest the table. + ScdType TableSpecificConfigScdType `tfsdk:"scd_type"` +} + +// The SCD type to use to ingest the table. +type TableSpecificConfigScdType string + +const TableSpecificConfigScdTypeScdType1 TableSpecificConfigScdType = `SCD_TYPE_1` + +const TableSpecificConfigScdTypeScdType2 TableSpecificConfigScdType = `SCD_TYPE_2` + +// String representation for [fmt.Print] +func (f *TableSpecificConfigScdType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TableSpecificConfigScdType) Set(v string) error { + switch v { + case `SCD_TYPE_1`, `SCD_TYPE_2`: + *f = TableSpecificConfigScdType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "SCD_TYPE_1", "SCD_TYPE_2"`, v) + } +} + +// Type always returns TableSpecificConfigScdType to satisfy [pflag.Value] interface +func (f *TableSpecificConfigScdType) Type() string { + return "TableSpecificConfigScdType" +} + +type UpdateInfo struct { + // What triggered this update. + Cause UpdateInfoCause `tfsdk:"cause"` + // The ID of the cluster that the update is running on. + ClusterId types.String `tfsdk:"cluster_id"` + // The pipeline configuration with system defaults applied where unspecified + // by the user. Not returned by ListUpdates. + Config *PipelineSpec `tfsdk:"config"` + // The time when this update was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // If true, this update will reset all tables before running. + FullRefresh types.Bool `tfsdk:"full_refresh"` + // A list of tables to update with fullRefresh. If both refresh_selection + // and full_refresh_selection are empty, this is a full graph update. Full + // Refresh on a table means that the states of the table will be reset + // before the refresh. + FullRefreshSelection []types.String `tfsdk:"full_refresh_selection"` + // The ID of the pipeline. + PipelineId types.String `tfsdk:"pipeline_id"` + // A list of tables to update without fullRefresh. If both refresh_selection + // and full_refresh_selection are empty, this is a full graph update. Full + // Refresh on a table means that the states of the table will be reset + // before the refresh. + RefreshSelection []types.String `tfsdk:"refresh_selection"` + // The update state. + State UpdateInfoState `tfsdk:"state"` + // The ID of this update. + UpdateId types.String `tfsdk:"update_id"` + // If true, this update only validates the correctness of pipeline source + // code but does not materialize or publish any datasets. + ValidateOnly types.Bool `tfsdk:"validate_only"` +} + +// What triggered this update. +type UpdateInfoCause string + +const UpdateInfoCauseApiCall UpdateInfoCause = `API_CALL` + +const UpdateInfoCauseJobTask UpdateInfoCause = `JOB_TASK` + +const UpdateInfoCauseRetryOnFailure UpdateInfoCause = `RETRY_ON_FAILURE` + +const UpdateInfoCauseSchemaChange UpdateInfoCause = `SCHEMA_CHANGE` + +const UpdateInfoCauseServiceUpgrade UpdateInfoCause = `SERVICE_UPGRADE` + +const UpdateInfoCauseUserAction UpdateInfoCause = `USER_ACTION` + +// String representation for [fmt.Print] +func (f *UpdateInfoCause) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateInfoCause) Set(v string) error { + switch v { + case `API_CALL`, `JOB_TASK`, `RETRY_ON_FAILURE`, `SCHEMA_CHANGE`, `SERVICE_UPGRADE`, `USER_ACTION`: + *f = UpdateInfoCause(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "API_CALL", "JOB_TASK", "RETRY_ON_FAILURE", "SCHEMA_CHANGE", "SERVICE_UPGRADE", "USER_ACTION"`, v) + } +} + +// Type always returns UpdateInfoCause to satisfy [pflag.Value] interface +func (f *UpdateInfoCause) Type() string { + return "UpdateInfoCause" +} + +// The update state. +type UpdateInfoState string + +const UpdateInfoStateCanceled UpdateInfoState = `CANCELED` + +const UpdateInfoStateCompleted UpdateInfoState = `COMPLETED` + +const UpdateInfoStateCreated UpdateInfoState = `CREATED` + +const UpdateInfoStateFailed UpdateInfoState = `FAILED` + +const UpdateInfoStateInitializing UpdateInfoState = `INITIALIZING` + +const UpdateInfoStateQueued UpdateInfoState = `QUEUED` + +const UpdateInfoStateResetting UpdateInfoState = `RESETTING` + +const UpdateInfoStateRunning UpdateInfoState = `RUNNING` + +const UpdateInfoStateSettingUpTables UpdateInfoState = `SETTING_UP_TABLES` + +const UpdateInfoStateStopping UpdateInfoState = `STOPPING` + +const UpdateInfoStateWaitingForResources UpdateInfoState = `WAITING_FOR_RESOURCES` + +// String representation for [fmt.Print] +func (f *UpdateInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateInfoState) Set(v string) error { + switch v { + case `CANCELED`, `COMPLETED`, `CREATED`, `FAILED`, `INITIALIZING`, `QUEUED`, `RESETTING`, `RUNNING`, `SETTING_UP_TABLES`, `STOPPING`, `WAITING_FOR_RESOURCES`: + *f = UpdateInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "COMPLETED", "CREATED", "FAILED", "INITIALIZING", "QUEUED", "RESETTING", "RUNNING", "SETTING_UP_TABLES", "STOPPING", "WAITING_FOR_RESOURCES"`, v) + } +} + +// Type always returns UpdateInfoState to satisfy [pflag.Value] interface +func (f *UpdateInfoState) Type() string { + return "UpdateInfoState" +} + +type UpdateStateInfo struct { + CreationTime types.String `tfsdk:"creation_time"` + + State UpdateStateInfoState `tfsdk:"state"` + + UpdateId types.String `tfsdk:"update_id"` +} + +type UpdateStateInfoState string + +const UpdateStateInfoStateCanceled UpdateStateInfoState = `CANCELED` + +const UpdateStateInfoStateCompleted UpdateStateInfoState = `COMPLETED` + +const UpdateStateInfoStateCreated UpdateStateInfoState = `CREATED` + +const UpdateStateInfoStateFailed UpdateStateInfoState = `FAILED` + +const UpdateStateInfoStateInitializing UpdateStateInfoState = `INITIALIZING` + +const UpdateStateInfoStateQueued UpdateStateInfoState = `QUEUED` + +const UpdateStateInfoStateResetting UpdateStateInfoState = `RESETTING` + +const UpdateStateInfoStateRunning UpdateStateInfoState = `RUNNING` + +const UpdateStateInfoStateSettingUpTables UpdateStateInfoState = `SETTING_UP_TABLES` + +const UpdateStateInfoStateStopping UpdateStateInfoState = `STOPPING` + +const UpdateStateInfoStateWaitingForResources UpdateStateInfoState = `WAITING_FOR_RESOURCES` + +// String representation for [fmt.Print] +func (f *UpdateStateInfoState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpdateStateInfoState) Set(v string) error { + switch v { + case `CANCELED`, `COMPLETED`, `CREATED`, `FAILED`, `INITIALIZING`, `QUEUED`, `RESETTING`, `RUNNING`, `SETTING_UP_TABLES`, `STOPPING`, `WAITING_FOR_RESOURCES`: + *f = UpdateStateInfoState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "COMPLETED", "CREATED", "FAILED", "INITIALIZING", "QUEUED", "RESETTING", "RUNNING", "SETTING_UP_TABLES", "STOPPING", "WAITING_FOR_RESOURCES"`, v) + } +} + +// Type always returns UpdateStateInfoState to satisfy [pflag.Value] interface +func (f *UpdateStateInfoState) Type() string { + return "UpdateStateInfoState" +} diff --git a/service/provisioning_tf/model.go b/service/provisioning_tf/model.go new file mode 100755 index 0000000000..d69d59f9f8 --- /dev/null +++ b/service/provisioning_tf/model.go @@ -0,0 +1,1103 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package provisioning_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AwsCredentials struct { + StsRole *StsRole `tfsdk:"sts_role"` +} + +type AwsKeyInfo struct { + // The AWS KMS key alias. + KeyAlias types.String `tfsdk:"key_alias"` + // The AWS KMS key's Amazon Resource Name (ARN). + KeyArn types.String `tfsdk:"key_arn"` + // The AWS KMS key region. + KeyRegion types.String `tfsdk:"key_region"` + // This field applies only if the `use_cases` property includes `STORAGE`. + // If this is set to `true` or omitted, the key is also used to encrypt + // cluster EBS volumes. If you do not want to use this key for encrypting + // EBS volumes, set to `false`. + ReuseKeyForClusterVolumes types.Bool `tfsdk:"reuse_key_for_cluster_volumes"` +} + +type AzureWorkspaceInfo struct { + // Azure Resource Group name + ResourceGroup types.String `tfsdk:"resource_group"` + // Azure Subscription ID + SubscriptionId types.String `tfsdk:"subscription_id"` +} + +// The general workspace configurations that are specific to cloud providers. +type CloudResourceContainer struct { + // The general workspace configurations that are specific to Google Cloud. + Gcp *CustomerFacingGcpCloudResourceContainer `tfsdk:"gcp"` +} + +type CreateAwsKeyInfo struct { + // The AWS KMS key alias. + KeyAlias types.String `tfsdk:"key_alias"` + // The AWS KMS key's Amazon Resource Name (ARN). Note that the key's AWS + // region is inferred from the ARN. + KeyArn types.String `tfsdk:"key_arn"` + // This field applies only if the `use_cases` property includes `STORAGE`. + // If this is set to `true` or omitted, the key is also used to encrypt + // cluster EBS volumes. To not use this key also for encrypting EBS volumes, + // set this to `false`. + ReuseKeyForClusterVolumes types.Bool `tfsdk:"reuse_key_for_cluster_volumes"` +} + +type CreateCredentialAwsCredentials struct { + StsRole *CreateCredentialStsRole `tfsdk:"sts_role"` +} + +type CreateCredentialRequest struct { + AwsCredentials CreateCredentialAwsCredentials `tfsdk:"aws_credentials"` + // The human-readable name of the credential configuration object. + CredentialsName types.String `tfsdk:"credentials_name"` +} + +type CreateCredentialStsRole struct { + // The Amazon Resource Name (ARN) of the cross account role. + RoleArn types.String `tfsdk:"role_arn"` +} + +type CreateCustomerManagedKeyRequest struct { + AwsKeyInfo *CreateAwsKeyInfo `tfsdk:"aws_key_info"` + + GcpKeyInfo *CreateGcpKeyInfo `tfsdk:"gcp_key_info"` + // The cases that the key can be used for. + UseCases []KeyUseCase `tfsdk:"use_cases"` +} + +type CreateGcpKeyInfo struct { + // The GCP KMS key's resource name + KmsKeyId types.String `tfsdk:"kms_key_id"` +} + +type CreateNetworkRequest struct { + // The Google Cloud specific information for this network (for example, the + // VPC ID, subnet ID, and secondary IP ranges). + GcpNetworkInfo *GcpNetworkInfo `tfsdk:"gcp_network_info"` + // The human-readable name of the network configuration. + NetworkName types.String `tfsdk:"network_name"` + // IDs of one to five security groups associated with this network. Security + // group IDs **cannot** be used in multiple network configurations. + SecurityGroupIds []types.String `tfsdk:"security_group_ids"` + // IDs of at least two subnets associated with this network. Subnet IDs + // **cannot** be used in multiple network configurations. + SubnetIds []types.String `tfsdk:"subnet_ids"` + // If specified, contains the VPC endpoints used to allow cluster + // communication from this VPC over [AWS PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ + VpcEndpoints *NetworkVpcEndpoints `tfsdk:"vpc_endpoints"` + // The ID of the VPC associated with this network. VPC IDs can be used in + // multiple network configurations. + VpcId types.String `tfsdk:"vpc_id"` +} + +type CreateStorageConfigurationRequest struct { + // Root S3 bucket information. + RootBucketInfo RootBucketInfo `tfsdk:"root_bucket_info"` + // The human-readable name of the storage configuration. + StorageConfigurationName types.String `tfsdk:"storage_configuration_name"` +} + +type CreateVpcEndpointRequest struct { + // The ID of the VPC endpoint object in AWS. + AwsVpcEndpointId types.String `tfsdk:"aws_vpc_endpoint_id"` + // The Google Cloud specific information for this Private Service Connect + // endpoint. + GcpVpcEndpointInfo *GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info"` + // The AWS region in which this VPC endpoint object exists. + Region types.String `tfsdk:"region"` + // The human-readable name of the storage configuration. + VpcEndpointName types.String `tfsdk:"vpc_endpoint_name"` +} + +type CreateWorkspaceRequest struct { + // The AWS region of the workspace's data plane. + AwsRegion types.String `tfsdk:"aws_region"` + // The cloud provider which the workspace uses. For Google Cloud workspaces, + // always set this field to `gcp`. + Cloud types.String `tfsdk:"cloud"` + // The general workspace configurations that are specific to cloud + // providers. + CloudResourceContainer *CloudResourceContainer `tfsdk:"cloud_resource_container"` + // ID of the workspace's credential configuration object. + CredentialsId types.String `tfsdk:"credentials_id"` + // The custom tags key-value pairing that is attached to this workspace. The + // key-value pair is a string of utf-8 characters. The value can be an empty + // string, with maximum length of 255 characters. The key can be of maximum + // length of 127 characters, and cannot be empty. + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // The deployment name defines part of the subdomain for the workspace. The + // workspace URL for the web application and REST APIs is + // `.cloud.databricks.com`. For example, if the + // deployment name is `abcsales`, your workspace URL will be + // `https://abcsales.cloud.databricks.com`. Hyphens are allowed. This + // property supports only the set of characters that are allowed in a + // subdomain. + // + // To set this value, you must have a deployment name prefix. Contact your + // Databricks account team to add an account deployment name prefix to your + // account. + // + // Workspace deployment names follow the account prefix and a hyphen. For + // example, if your account's deployment prefix is `acme` and the workspace + // deployment name is `workspace-1`, the JSON response for the + // `deployment_name` field becomes `acme-workspace-1`. The workspace URL + // would be `acme-workspace-1.cloud.databricks.com`. + // + // You can also set the `deployment_name` to the reserved keyword `EMPTY` if + // you want the deployment name to only include the deployment prefix. For + // example, if your account's deployment prefix is `acme` and the workspace + // deployment name is `EMPTY`, the `deployment_name` becomes `acme` only and + // the workspace URL is `acme.cloud.databricks.com`. + // + // This value must be unique across all non-deleted deployments across all + // AWS regions. + // + // If a new workspace omits this property, the server generates a unique + // deployment name for you with the pattern `dbc-xxxxxxxx-xxxx`. + DeploymentName types.String `tfsdk:"deployment_name"` + // The network settings for the workspace. The configurations are only for + // Databricks-managed VPCs. It is ignored if you specify a customer-managed + // VPC in the `network_id` field.", All the IP range configurations must be + // mutually exclusive. An attempt to create a workspace fails if Databricks + // detects an IP range overlap. + // + // Specify custom IP ranges in CIDR format. The IP ranges for these fields + // must not overlap, and all IP addresses must be entirely within the + // following ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`, + // `192.168.0.0/16`, and `240.0.0.0/4`. + // + // The sizes of these IP ranges affect the maximum number of nodes for the + // workspace. + // + // **Important**: Confirm the IP ranges used by your Databricks workspace + // before creating the workspace. You cannot change them after your + // workspace is deployed. If the IP address ranges for your Databricks are + // too small, IP exhaustion can occur, causing your Databricks jobs to fail. + // To determine the address range sizes that you need, Databricks provides a + // calculator as a Microsoft Excel spreadsheet. See [calculate subnet sizes + // for a new workspace]. + // + // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html + GcpManagedNetworkConfig *GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config"` + // The configurations for the GKE cluster of a Databricks workspace. + GkeConfig *GkeConfig `tfsdk:"gke_config"` + // The Google Cloud region of the workspace data plane in your Google + // account. For example, `us-east4`. + Location types.String `tfsdk:"location"` + // The ID of the workspace's managed services encryption key configuration + // object. This is used to help protect and control access to the + // workspace's notebooks, secrets, Databricks SQL queries, and query + // history. The provided key configuration object property `use_cases` must + // contain `MANAGED_SERVICES`. + ManagedServicesCustomerManagedKeyId types.String `tfsdk:"managed_services_customer_managed_key_id"` + + NetworkId types.String `tfsdk:"network_id"` + // The pricing tier of the workspace. For pricing tier information, see [AWS + // Pricing]. + // + // [AWS Pricing]: https://databricks.com/product/aws-pricing + PricingTier PricingTier `tfsdk:"pricing_tier"` + // ID of the workspace's private access settings object. Only used for + // PrivateLink. This ID must be specified for customers using [AWS + // PrivateLink] for either front-end (user-to-workspace connection), + // back-end (data plane to control plane connection), or both connection + // types. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink].", + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + PrivateAccessSettingsId types.String `tfsdk:"private_access_settings_id"` + // The ID of the workspace's storage configuration object. + StorageConfigurationId types.String `tfsdk:"storage_configuration_id"` + // The ID of the workspace's storage encryption key configuration object. + // This is used to encrypt the workspace's root S3 bucket (root DBFS and + // system data) and, optionally, cluster EBS volumes. The provided key + // configuration object property `use_cases` must contain `STORAGE`. + StorageCustomerManagedKeyId types.String `tfsdk:"storage_customer_managed_key_id"` + // The workspace's human-readable name. + WorkspaceName types.String `tfsdk:"workspace_name"` +} + +type Credential struct { + // The Databricks account ID that hosts the credential. + AccountId types.String `tfsdk:"account_id"` + + AwsCredentials *AwsCredentials `tfsdk:"aws_credentials"` + // Time in epoch milliseconds when the credential was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // Databricks credential configuration ID. + CredentialsId types.String `tfsdk:"credentials_id"` + // The human-readable name of the credential configuration object. + CredentialsName types.String `tfsdk:"credentials_name"` +} + +// The custom tags key-value pairing that is attached to this workspace. The +// key-value pair is a string of utf-8 characters. The value can be an empty +// string, with maximum length of 255 characters. The key can be of maximum +// length of 127 characters, and cannot be empty. +type CustomTags map[string]types.String + +// The general workspace configurations that are specific to Google Cloud. +type CustomerFacingGcpCloudResourceContainer struct { + // The Google Cloud project ID, which the workspace uses to instantiate + // cloud resources for your workspace. + ProjectId types.String `tfsdk:"project_id"` +} + +type CustomerManagedKey struct { + // The Databricks account ID that holds the customer-managed key. + AccountId types.String `tfsdk:"account_id"` + + AwsKeyInfo *AwsKeyInfo `tfsdk:"aws_key_info"` + // Time in epoch milliseconds when the customer key was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // ID of the encryption key configuration object. + CustomerManagedKeyId types.String `tfsdk:"customer_managed_key_id"` + + GcpKeyInfo *GcpKeyInfo `tfsdk:"gcp_key_info"` + // The cases that the key can be used for. + UseCases []KeyUseCase `tfsdk:"use_cases"` +} + +// Delete credential configuration +type DeleteCredentialRequest struct { + // Databricks Account API credential configuration ID + CredentialsId types.String `tfsdk:"-" url:"-"` +} + +// Delete encryption key configuration +type DeleteEncryptionKeyRequest struct { + // Databricks encryption key configuration ID. + CustomerManagedKeyId types.String `tfsdk:"-" url:"-"` +} + +// Delete a network configuration +type DeleteNetworkRequest struct { + // Databricks Account API network configuration ID. + NetworkId types.String `tfsdk:"-" url:"-"` +} + +// Delete a private access settings object +type DeletePrivateAccesRequest struct { + // Databricks Account API private access settings ID. + PrivateAccessSettingsId types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete storage configuration +type DeleteStorageRequest struct { + // Databricks Account API storage configuration ID. + StorageConfigurationId types.String `tfsdk:"-" url:"-"` +} + +// Delete VPC endpoint configuration +type DeleteVpcEndpointRequest struct { + // Databricks VPC endpoint ID. + VpcEndpointId types.String `tfsdk:"-" url:"-"` +} + +// Delete a workspace +type DeleteWorkspaceRequest struct { + // Workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +// This enumeration represents the type of Databricks VPC [endpoint service] +// that was used when creating this VPC endpoint. +// +// [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html +type EndpointUseCase string + +const EndpointUseCaseDataplaneRelayAccess EndpointUseCase = `DATAPLANE_RELAY_ACCESS` + +const EndpointUseCaseWorkspaceAccess EndpointUseCase = `WORKSPACE_ACCESS` + +// String representation for [fmt.Print] +func (f *EndpointUseCase) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointUseCase) Set(v string) error { + switch v { + case `DATAPLANE_RELAY_ACCESS`, `WORKSPACE_ACCESS`: + *f = EndpointUseCase(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATAPLANE_RELAY_ACCESS", "WORKSPACE_ACCESS"`, v) + } +} + +// Type always returns EndpointUseCase to satisfy [pflag.Value] interface +func (f *EndpointUseCase) Type() string { + return "EndpointUseCase" +} + +// The AWS resource associated with this error: credentials, VPC, subnet, +// security group, or network ACL. +type ErrorType string + +const ErrorTypeCredentials ErrorType = `credentials` + +const ErrorTypeNetworkAcl ErrorType = `networkAcl` + +const ErrorTypeSecurityGroup ErrorType = `securityGroup` + +const ErrorTypeSubnet ErrorType = `subnet` + +const ErrorTypeVpc ErrorType = `vpc` + +// String representation for [fmt.Print] +func (f *ErrorType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ErrorType) Set(v string) error { + switch v { + case `credentials`, `networkAcl`, `securityGroup`, `subnet`, `vpc`: + *f = ErrorType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "credentials", "networkAcl", "securityGroup", "subnet", "vpc"`, v) + } +} + +// Type always returns ErrorType to satisfy [pflag.Value] interface +func (f *ErrorType) Type() string { + return "ErrorType" +} + +type GcpKeyInfo struct { + // The GCP KMS key's resource name + KmsKeyId types.String `tfsdk:"kms_key_id"` +} + +// The network settings for the workspace. The configurations are only for +// Databricks-managed VPCs. It is ignored if you specify a customer-managed VPC +// in the `network_id` field.", All the IP range configurations must be mutually +// exclusive. An attempt to create a workspace fails if Databricks detects an IP +// range overlap. +// +// Specify custom IP ranges in CIDR format. The IP ranges for these fields must +// not overlap, and all IP addresses must be entirely within the following +// ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`, `192.168.0.0/16`, and +// `240.0.0.0/4`. +// +// The sizes of these IP ranges affect the maximum number of nodes for the +// workspace. +// +// **Important**: Confirm the IP ranges used by your Databricks workspace before +// creating the workspace. You cannot change them after your workspace is +// deployed. If the IP address ranges for your Databricks are too small, IP +// exhaustion can occur, causing your Databricks jobs to fail. To determine the +// address range sizes that you need, Databricks provides a calculator as a +// Microsoft Excel spreadsheet. See [calculate subnet sizes for a new +// workspace]. +// +// [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html +type GcpManagedNetworkConfig struct { + // The IP range from which to allocate GKE cluster pods. No bigger than `/9` + // and no smaller than `/21`. + GkeClusterPodIpRange types.String `tfsdk:"gke_cluster_pod_ip_range"` + // The IP range from which to allocate GKE cluster services. No bigger than + // `/16` and no smaller than `/27`. + GkeClusterServiceIpRange types.String `tfsdk:"gke_cluster_service_ip_range"` + // The IP range from which to allocate GKE cluster nodes. No bigger than + // `/9` and no smaller than `/29`. + SubnetCidr types.String `tfsdk:"subnet_cidr"` +} + +// The Google Cloud specific information for this network (for example, the VPC +// ID, subnet ID, and secondary IP ranges). +type GcpNetworkInfo struct { + // The Google Cloud project ID of the VPC network. + NetworkProjectId types.String `tfsdk:"network_project_id"` + // The name of the secondary IP range for pods. A Databricks-managed GKE + // cluster uses this IP range for its pods. This secondary IP range can be + // used by only one workspace. + PodIpRangeName types.String `tfsdk:"pod_ip_range_name"` + // The name of the secondary IP range for services. A Databricks-managed GKE + // cluster uses this IP range for its services. This secondary IP range can + // be used by only one workspace. + ServiceIpRangeName types.String `tfsdk:"service_ip_range_name"` + // The ID of the subnet associated with this network. + SubnetId types.String `tfsdk:"subnet_id"` + // The Google Cloud region of the workspace data plane (for example, + // `us-east4`). + SubnetRegion types.String `tfsdk:"subnet_region"` + // The ID of the VPC associated with this network. VPC IDs can be used in + // multiple network configurations. + VpcId types.String `tfsdk:"vpc_id"` +} + +// The Google Cloud specific information for this Private Service Connect +// endpoint. +type GcpVpcEndpointInfo struct { + // Region of the PSC endpoint. + EndpointRegion types.String `tfsdk:"endpoint_region"` + // The Google Cloud project ID of the VPC network where the PSC connection + // resides. + ProjectId types.String `tfsdk:"project_id"` + // The unique ID of this PSC connection. + PscConnectionId types.String `tfsdk:"psc_connection_id"` + // The name of the PSC endpoint in the Google Cloud project. + PscEndpointName types.String `tfsdk:"psc_endpoint_name"` + // The service attachment this PSC connection connects to. + ServiceAttachmentId types.String `tfsdk:"service_attachment_id"` +} + +// Get credential configuration +type GetCredentialRequest struct { + // Databricks Account API credential configuration ID + CredentialsId types.String `tfsdk:"-" url:"-"` +} + +// Get encryption key configuration +type GetEncryptionKeyRequest struct { + // Databricks encryption key configuration ID. + CustomerManagedKeyId types.String `tfsdk:"-" url:"-"` +} + +// Get a network configuration +type GetNetworkRequest struct { + // Databricks Account API network configuration ID. + NetworkId types.String `tfsdk:"-" url:"-"` +} + +// Get a private access settings object +type GetPrivateAccesRequest struct { + // Databricks Account API private access settings ID. + PrivateAccessSettingsId types.String `tfsdk:"-" url:"-"` +} + +// Get storage configuration +type GetStorageRequest struct { + // Databricks Account API storage configuration ID. + StorageConfigurationId types.String `tfsdk:"-" url:"-"` +} + +// Get a VPC endpoint configuration +type GetVpcEndpointRequest struct { + // Databricks VPC endpoint ID. + VpcEndpointId types.String `tfsdk:"-" url:"-"` +} + +// Get a workspace +type GetWorkspaceRequest struct { + // Workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +// The configurations for the GKE cluster of a Databricks workspace. +type GkeConfig struct { + // Specifies the network connectivity types for the GKE nodes and the GKE + // master network. + // + // Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the + // workspace. The GKE nodes will not have public IPs. + // + // Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of + // a public GKE cluster have public IP addresses. + ConnectivityType GkeConfigConnectivityType `tfsdk:"connectivity_type"` + // The IP range from which to allocate GKE cluster master resources. This + // field will be ignored if GKE private cluster is not enabled. + // + // It must be exactly as big as `/28`. + MasterIpRange types.String `tfsdk:"master_ip_range"` +} + +// Specifies the network connectivity types for the GKE nodes and the GKE master +// network. +// +// Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the +// workspace. The GKE nodes will not have public IPs. +// +// Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a +// public GKE cluster have public IP addresses. +type GkeConfigConnectivityType string + +const GkeConfigConnectivityTypePrivateNodePublicMaster GkeConfigConnectivityType = `PRIVATE_NODE_PUBLIC_MASTER` + +const GkeConfigConnectivityTypePublicNodePublicMaster GkeConfigConnectivityType = `PUBLIC_NODE_PUBLIC_MASTER` + +// String representation for [fmt.Print] +func (f *GkeConfigConnectivityType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GkeConfigConnectivityType) Set(v string) error { + switch v { + case `PRIVATE_NODE_PUBLIC_MASTER`, `PUBLIC_NODE_PUBLIC_MASTER`: + *f = GkeConfigConnectivityType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PRIVATE_NODE_PUBLIC_MASTER", "PUBLIC_NODE_PUBLIC_MASTER"`, v) + } +} + +// Type always returns GkeConfigConnectivityType to satisfy [pflag.Value] interface +func (f *GkeConfigConnectivityType) Type() string { + return "GkeConfigConnectivityType" +} + +// Possible values are: * `MANAGED_SERVICES`: Encrypts notebook and secret data +// in the control plane * `STORAGE`: Encrypts the workspace's root S3 bucket +// (root DBFS and system data) and, optionally, cluster EBS volumes. +type KeyUseCase string + +// Encrypts notebook and secret data in the control plane +const KeyUseCaseManagedServices KeyUseCase = `MANAGED_SERVICES` + +// Encrypts the workspace's root S3 bucket (root DBFS and system data) and, +// optionally, cluster EBS volumes. +const KeyUseCaseStorage KeyUseCase = `STORAGE` + +// String representation for [fmt.Print] +func (f *KeyUseCase) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *KeyUseCase) Set(v string) error { + switch v { + case `MANAGED_SERVICES`, `STORAGE`: + *f = KeyUseCase(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MANAGED_SERVICES", "STORAGE"`, v) + } +} + +// Type always returns KeyUseCase to satisfy [pflag.Value] interface +func (f *KeyUseCase) Type() string { + return "KeyUseCase" +} + +type Network struct { + // The Databricks account ID associated with this network configuration. + AccountId types.String `tfsdk:"account_id"` + // Time in epoch milliseconds when the network was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // Array of error messages about the network configuration. + ErrorMessages []NetworkHealth `tfsdk:"error_messages"` + // The Google Cloud specific information for this network (for example, the + // VPC ID, subnet ID, and secondary IP ranges). + GcpNetworkInfo *GcpNetworkInfo `tfsdk:"gcp_network_info"` + // The Databricks network configuration ID. + NetworkId types.String `tfsdk:"network_id"` + // The human-readable name of the network configuration. + NetworkName types.String `tfsdk:"network_name"` + + SecurityGroupIds []types.String `tfsdk:"security_group_ids"` + + SubnetIds []types.String `tfsdk:"subnet_ids"` + // If specified, contains the VPC endpoints used to allow cluster + // communication from this VPC over [AWS PrivateLink]. + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ + VpcEndpoints *NetworkVpcEndpoints `tfsdk:"vpc_endpoints"` + // The ID of the VPC associated with this network configuration. VPC IDs can + // be used in multiple networks. + VpcId types.String `tfsdk:"vpc_id"` + // The status of this network configuration object in terms of its use in a + // workspace: * `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: + // Broken. * `WARNED`: Warned. + VpcStatus VpcStatus `tfsdk:"vpc_status"` + // Array of warning messages about the network configuration. + WarningMessages []NetworkWarning `tfsdk:"warning_messages"` + // Workspace ID associated with this network configuration. + WorkspaceId types.Int64 `tfsdk:"workspace_id"` +} + +type NetworkHealth struct { + // Details of the error. + ErrorMessage types.String `tfsdk:"error_message"` + // The AWS resource associated with this error: credentials, VPC, subnet, + // security group, or network ACL. + ErrorType ErrorType `tfsdk:"error_type"` +} + +// If specified, contains the VPC endpoints used to allow cluster communication +// from this VPC over [AWS PrivateLink]. +// +// [AWS PrivateLink]: https://aws.amazon.com/privatelink/ +type NetworkVpcEndpoints struct { + // The VPC endpoint ID used by this network to access the Databricks secure + // cluster connectivity relay. + DataplaneRelay []types.String `tfsdk:"dataplane_relay"` + // The VPC endpoint ID used by this network to access the Databricks REST + // API. + RestApi []types.String `tfsdk:"rest_api"` +} + +type NetworkWarning struct { + // Details of the warning. + WarningMessage types.String `tfsdk:"warning_message"` + // The AWS resource associated with this warning: a subnet or a security + // group. + WarningType WarningType `tfsdk:"warning_type"` +} + +// The pricing tier of the workspace. For pricing tier information, see [AWS +// Pricing]. +// +// [AWS Pricing]: https://databricks.com/product/aws-pricing +type PricingTier string + +const PricingTierCommunityEdition PricingTier = `COMMUNITY_EDITION` + +const PricingTierDedicated PricingTier = `DEDICATED` + +const PricingTierEnterprise PricingTier = `ENTERPRISE` + +const PricingTierPremium PricingTier = `PREMIUM` + +const PricingTierStandard PricingTier = `STANDARD` + +const PricingTierUnknown PricingTier = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *PricingTier) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PricingTier) Set(v string) error { + switch v { + case `COMMUNITY_EDITION`, `DEDICATED`, `ENTERPRISE`, `PREMIUM`, `STANDARD`, `UNKNOWN`: + *f = PricingTier(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COMMUNITY_EDITION", "DEDICATED", "ENTERPRISE", "PREMIUM", "STANDARD", "UNKNOWN"`, v) + } +} + +// Type always returns PricingTier to satisfy [pflag.Value] interface +func (f *PricingTier) Type() string { + return "PricingTier" +} + +// The private access level controls which VPC endpoints can connect to the UI +// or API of any workspace that attaches this private access settings object. * +// `ACCOUNT` level access (the default) allows only VPC endpoints that are +// registered in your Databricks account connect to your workspace. * `ENDPOINT` +// level access allows only specified VPC endpoints connect to your workspace. +// For details, see `allowed_vpc_endpoint_ids`. +type PrivateAccessLevel string + +const PrivateAccessLevelAccount PrivateAccessLevel = `ACCOUNT` + +const PrivateAccessLevelEndpoint PrivateAccessLevel = `ENDPOINT` + +// String representation for [fmt.Print] +func (f *PrivateAccessLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PrivateAccessLevel) Set(v string) error { + switch v { + case `ACCOUNT`, `ENDPOINT`: + *f = PrivateAccessLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACCOUNT", "ENDPOINT"`, v) + } +} + +// Type always returns PrivateAccessLevel to satisfy [pflag.Value] interface +func (f *PrivateAccessLevel) Type() string { + return "PrivateAccessLevel" +} + +type PrivateAccessSettings struct { + // The Databricks account ID that hosts the credential. + AccountId types.String `tfsdk:"account_id"` + // An array of Databricks VPC endpoint IDs. + AllowedVpcEndpointIds []types.String `tfsdk:"allowed_vpc_endpoint_ids"` + // The private access level controls which VPC endpoints can connect to the + // UI or API of any workspace that attaches this private access settings + // object. * `ACCOUNT` level access (the default) allows only VPC endpoints + // that are registered in your Databricks account connect to your workspace. + // * `ENDPOINT` level access allows only specified VPC endpoints connect to + // your workspace. For details, see `allowed_vpc_endpoint_ids`. + PrivateAccessLevel PrivateAccessLevel `tfsdk:"private_access_level"` + // Databricks private access settings ID. + PrivateAccessSettingsId types.String `tfsdk:"private_access_settings_id"` + // The human-readable name of the private access settings object. + PrivateAccessSettingsName types.String `tfsdk:"private_access_settings_name"` + // Determines if the workspace can be accessed over public internet. For + // fully private workspaces, you can optionally specify `false`, but only if + // you implement both the front-end and the back-end PrivateLink + // connections. Otherwise, specify `true`, which means that public access is + // enabled. + PublicAccessEnabled types.Bool `tfsdk:"public_access_enabled"` + // The cloud region for workspaces attached to this private access settings + // object. + Region types.String `tfsdk:"region"` +} + +type ReplaceResponse struct { +} + +// Root S3 bucket information. +type RootBucketInfo struct { + // The name of the S3 bucket. + BucketName types.String `tfsdk:"bucket_name"` +} + +type StorageConfiguration struct { + // The Databricks account ID that hosts the credential. + AccountId types.String `tfsdk:"account_id"` + // Time in epoch milliseconds when the storage configuration was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // Root S3 bucket information. + RootBucketInfo *RootBucketInfo `tfsdk:"root_bucket_info"` + // Databricks storage configuration ID. + StorageConfigurationId types.String `tfsdk:"storage_configuration_id"` + // The human-readable name of the storage configuration. + StorageConfigurationName types.String `tfsdk:"storage_configuration_name"` +} + +type StsRole struct { + // The external ID that needs to be trusted by the cross-account role. This + // is always your Databricks account ID. + ExternalId types.String `tfsdk:"external_id"` + // The Amazon Resource Name (ARN) of the cross account role. + RoleArn types.String `tfsdk:"role_arn"` +} + +type UpdateResponse struct { +} + +type UpdateWorkspaceRequest struct { + // The AWS region of the workspace's data plane (for example, `us-west-2`). + // This parameter is available only for updating failed workspaces. + AwsRegion types.String `tfsdk:"aws_region"` + // ID of the workspace's credential configuration object. This parameter is + // available for updating both failed and running workspaces. + CredentialsId types.String `tfsdk:"credentials_id"` + // The custom tags key-value pairing that is attached to this workspace. The + // key-value pair is a string of utf-8 characters. The value can be an empty + // string, with maximum length of 255 characters. The key can be of maximum + // length of 127 characters, and cannot be empty. + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // The ID of the workspace's managed services encryption key configuration + // object. This parameter is available only for updating failed workspaces. + ManagedServicesCustomerManagedKeyId types.String `tfsdk:"managed_services_customer_managed_key_id"` + + NetworkConnectivityConfigId types.String `tfsdk:"network_connectivity_config_id"` + // The ID of the workspace's network configuration object. Used only if you + // already use a customer-managed VPC. For failed workspaces only, you can + // switch from a Databricks-managed VPC to a customer-managed VPC by + // updating the workspace to add a network configuration ID. + NetworkId types.String `tfsdk:"network_id"` + // The ID of the workspace's storage configuration object. This parameter is + // available only for updating failed workspaces. + StorageConfigurationId types.String `tfsdk:"storage_configuration_id"` + // The ID of the key configuration object for workspace storage. This + // parameter is available for updating both failed and running workspaces. + StorageCustomerManagedKeyId types.String `tfsdk:"storage_customer_managed_key_id"` + // Workspace ID. + WorkspaceId types.Int64 `tfsdk:"-" url:"-"` +} + +type UpsertPrivateAccessSettingsRequest struct { + // An array of Databricks VPC endpoint IDs. This is the Databricks ID that + // is returned when registering the VPC endpoint configuration in your + // Databricks account. This is not the ID of the VPC endpoint in AWS. + // + // Only used when `private_access_level` is set to `ENDPOINT`. This is an + // allow list of VPC endpoints that in your account that can connect to your + // workspace over AWS PrivateLink. + // + // If hybrid access to your workspace is enabled by setting + // `public_access_enabled` to `true`, this control only works for + // PrivateLink connections. To control how your workspace is accessed via + // public internet, see [IP access lists]. + // + // [IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html + AllowedVpcEndpointIds []types.String `tfsdk:"allowed_vpc_endpoint_ids"` + // The private access level controls which VPC endpoints can connect to the + // UI or API of any workspace that attaches this private access settings + // object. * `ACCOUNT` level access (the default) allows only VPC endpoints + // that are registered in your Databricks account connect to your workspace. + // * `ENDPOINT` level access allows only specified VPC endpoints connect to + // your workspace. For details, see `allowed_vpc_endpoint_ids`. + PrivateAccessLevel PrivateAccessLevel `tfsdk:"private_access_level"` + // Databricks Account API private access settings ID. + PrivateAccessSettingsId types.String `tfsdk:"-" url:"-"` + // The human-readable name of the private access settings object. + PrivateAccessSettingsName types.String `tfsdk:"private_access_settings_name"` + // Determines if the workspace can be accessed over public internet. For + // fully private workspaces, you can optionally specify `false`, but only if + // you implement both the front-end and the back-end PrivateLink + // connections. Otherwise, specify `true`, which means that public access is + // enabled. + PublicAccessEnabled types.Bool `tfsdk:"public_access_enabled"` + // The cloud region for workspaces associated with this private access + // settings object. + Region types.String `tfsdk:"region"` +} + +type VpcEndpoint struct { + // The Databricks account ID that hosts the VPC endpoint configuration. + AccountId types.String `tfsdk:"account_id"` + // The AWS Account in which the VPC endpoint object exists. + AwsAccountId types.String `tfsdk:"aws_account_id"` + // The ID of the Databricks [endpoint service] that this VPC endpoint is + // connected to. For a list of endpoint service IDs for each supported AWS + // region, see the [Databricks PrivateLink documentation]. + // + // [Databricks PrivateLink documentation]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + // [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html + AwsEndpointServiceId types.String `tfsdk:"aws_endpoint_service_id"` + // The ID of the VPC endpoint object in AWS. + AwsVpcEndpointId types.String `tfsdk:"aws_vpc_endpoint_id"` + // The Google Cloud specific information for this Private Service Connect + // endpoint. + GcpVpcEndpointInfo *GcpVpcEndpointInfo `tfsdk:"gcp_vpc_endpoint_info"` + // The AWS region in which this VPC endpoint object exists. + Region types.String `tfsdk:"region"` + // The current state (such as `available` or `rejected`) of the VPC + // endpoint. Derived from AWS. For the full set of values, see [AWS + // DescribeVpcEndpoint documentation]. + // + // [AWS DescribeVpcEndpoint documentation]: https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-vpc-endpoints.html + State types.String `tfsdk:"state"` + // This enumeration represents the type of Databricks VPC [endpoint service] + // that was used when creating this VPC endpoint. + // + // [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html + UseCase EndpointUseCase `tfsdk:"use_case"` + // Databricks VPC endpoint ID. This is the Databricks-specific name of the + // VPC endpoint. Do not confuse this with the `aws_vpc_endpoint_id`, which + // is the ID within AWS of the VPC endpoint. + VpcEndpointId types.String `tfsdk:"vpc_endpoint_id"` + // The human-readable name of the storage configuration. + VpcEndpointName types.String `tfsdk:"vpc_endpoint_name"` +} + +// The status of this network configuration object in terms of its use in a +// workspace: * `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: Broken. +// * `WARNED`: Warned. +type VpcStatus string + +// Broken. +const VpcStatusBroken VpcStatus = `BROKEN` + +// Unattached. +const VpcStatusUnattached VpcStatus = `UNATTACHED` + +// Valid. +const VpcStatusValid VpcStatus = `VALID` + +// Warned. +const VpcStatusWarned VpcStatus = `WARNED` + +// String representation for [fmt.Print] +func (f *VpcStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *VpcStatus) Set(v string) error { + switch v { + case `BROKEN`, `UNATTACHED`, `VALID`, `WARNED`: + *f = VpcStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BROKEN", "UNATTACHED", "VALID", "WARNED"`, v) + } +} + +// Type always returns VpcStatus to satisfy [pflag.Value] interface +func (f *VpcStatus) Type() string { + return "VpcStatus" +} + +// The AWS resource associated with this warning: a subnet or a security group. +type WarningType string + +const WarningTypeSecurityGroup WarningType = `securityGroup` + +const WarningTypeSubnet WarningType = `subnet` + +// String representation for [fmt.Print] +func (f *WarningType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WarningType) Set(v string) error { + switch v { + case `securityGroup`, `subnet`: + *f = WarningType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "securityGroup", "subnet"`, v) + } +} + +// Type always returns WarningType to satisfy [pflag.Value] interface +func (f *WarningType) Type() string { + return "WarningType" +} + +type Workspace struct { + // Databricks account ID. + AccountId types.String `tfsdk:"account_id"` + // The AWS region of the workspace data plane (for example, `us-west-2`). + AwsRegion types.String `tfsdk:"aws_region"` + + AzureWorkspaceInfo *AzureWorkspaceInfo `tfsdk:"azure_workspace_info"` + // The cloud name. This field always has the value `gcp`. + Cloud types.String `tfsdk:"cloud"` + // The general workspace configurations that are specific to cloud + // providers. + CloudResourceContainer *CloudResourceContainer `tfsdk:"cloud_resource_container"` + // Time in epoch milliseconds when the workspace was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // ID of the workspace's credential configuration object. + CredentialsId types.String `tfsdk:"credentials_id"` + // The custom tags key-value pairing that is attached to this workspace. The + // key-value pair is a string of utf-8 characters. The value can be an empty + // string, with maximum length of 255 characters. The key can be of maximum + // length of 127 characters, and cannot be empty. + CustomTags map[string]types.String `tfsdk:"custom_tags"` + // The deployment name defines part of the subdomain for the workspace. The + // workspace URL for web application and REST APIs is + // `.cloud.databricks.com`. + // + // This value must be unique across all non-deleted deployments across all + // AWS regions. + DeploymentName types.String `tfsdk:"deployment_name"` + // The network settings for the workspace. The configurations are only for + // Databricks-managed VPCs. It is ignored if you specify a customer-managed + // VPC in the `network_id` field.", All the IP range configurations must be + // mutually exclusive. An attempt to create a workspace fails if Databricks + // detects an IP range overlap. + // + // Specify custom IP ranges in CIDR format. The IP ranges for these fields + // must not overlap, and all IP addresses must be entirely within the + // following ranges: `10.0.0.0/8`, `100.64.0.0/10`, `172.16.0.0/12`, + // `192.168.0.0/16`, and `240.0.0.0/4`. + // + // The sizes of these IP ranges affect the maximum number of nodes for the + // workspace. + // + // **Important**: Confirm the IP ranges used by your Databricks workspace + // before creating the workspace. You cannot change them after your + // workspace is deployed. If the IP address ranges for your Databricks are + // too small, IP exhaustion can occur, causing your Databricks jobs to fail. + // To determine the address range sizes that you need, Databricks provides a + // calculator as a Microsoft Excel spreadsheet. See [calculate subnet sizes + // for a new workspace]. + // + // [calculate subnet sizes for a new workspace]: https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html + GcpManagedNetworkConfig *GcpManagedNetworkConfig `tfsdk:"gcp_managed_network_config"` + // The configurations for the GKE cluster of a Databricks workspace. + GkeConfig *GkeConfig `tfsdk:"gke_config"` + // The Google Cloud region of the workspace data plane in your Google + // account (for example, `us-east4`). + Location types.String `tfsdk:"location"` + // ID of the key configuration for encrypting managed services. + ManagedServicesCustomerManagedKeyId types.String `tfsdk:"managed_services_customer_managed_key_id"` + // The network configuration ID that is attached to the workspace. This + // field is available only if the network is a customer-managed network. + NetworkId types.String `tfsdk:"network_id"` + // The pricing tier of the workspace. For pricing tier information, see [AWS + // Pricing]. + // + // [AWS Pricing]: https://databricks.com/product/aws-pricing + PricingTier PricingTier `tfsdk:"pricing_tier"` + // ID of the workspace's private access settings object. Only used for + // PrivateLink. You must specify this ID if you are using [AWS PrivateLink] + // for either front-end (user-to-workspace connection), back-end (data plane + // to control plane connection), or both connection types. + // + // Before configuring PrivateLink, read the [Databricks article about + // PrivateLink].", + // + // [AWS PrivateLink]: https://aws.amazon.com/privatelink/ + // [Databricks article about PrivateLink]: https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html + PrivateAccessSettingsId types.String `tfsdk:"private_access_settings_id"` + // ID of the workspace's storage configuration object. + StorageConfigurationId types.String `tfsdk:"storage_configuration_id"` + // ID of the key configuration for encrypting workspace storage. + StorageCustomerManagedKeyId types.String `tfsdk:"storage_customer_managed_key_id"` + // A unique integer ID for the workspace + WorkspaceId types.Int64 `tfsdk:"workspace_id"` + // The human-readable name of the workspace. + WorkspaceName types.String `tfsdk:"workspace_name"` + // The status of the workspace. For workspace creation, usually it is set to + // `PROVISIONING` initially. Continue to check the status until the status + // is `RUNNING`. + WorkspaceStatus WorkspaceStatus `tfsdk:"workspace_status"` + // Message describing the current workspace status. + WorkspaceStatusMessage types.String `tfsdk:"workspace_status_message"` +} + +// The status of the workspace. For workspace creation, usually it is set to +// `PROVISIONING` initially. Continue to check the status until the status is +// `RUNNING`. +type WorkspaceStatus string + +const WorkspaceStatusBanned WorkspaceStatus = `BANNED` + +const WorkspaceStatusCancelling WorkspaceStatus = `CANCELLING` + +const WorkspaceStatusFailed WorkspaceStatus = `FAILED` + +const WorkspaceStatusNotProvisioned WorkspaceStatus = `NOT_PROVISIONED` + +const WorkspaceStatusProvisioning WorkspaceStatus = `PROVISIONING` + +const WorkspaceStatusRunning WorkspaceStatus = `RUNNING` + +// String representation for [fmt.Print] +func (f *WorkspaceStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WorkspaceStatus) Set(v string) error { + switch v { + case `BANNED`, `CANCELLING`, `FAILED`, `NOT_PROVISIONED`, `PROVISIONING`, `RUNNING`: + *f = WorkspaceStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "BANNED", "CANCELLING", "FAILED", "NOT_PROVISIONED", "PROVISIONING", "RUNNING"`, v) + } +} + +// Type always returns WorkspaceStatus to satisfy [pflag.Value] interface +func (f *WorkspaceStatus) Type() string { + return "WorkspaceStatus" +} diff --git a/service/serving_tf/model.go b/service/serving_tf/model.go new file mode 100755 index 0000000000..c516c84063 --- /dev/null +++ b/service/serving_tf/model.go @@ -0,0 +1,1670 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package serving_tf + +import ( + "fmt" + "io" + + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type Ai21LabsConfig struct { + // The Databricks secret key reference for an AI21Labs API key. + Ai21labsApiKey types.String `tfsdk:"ai21labs_api_key"` +} + +type AmazonBedrockConfig struct { + // The Databricks secret key reference for an AWS Access Key ID with + // permissions to interact with Bedrock services. + AwsAccessKeyId types.String `tfsdk:"aws_access_key_id"` + // The AWS region to use. Bedrock has to be enabled there. + AwsRegion types.String `tfsdk:"aws_region"` + // The Databricks secret key reference for an AWS Secret Access Key paired + // with the access key ID, with permissions to interact with Bedrock + // services. + AwsSecretAccessKey types.String `tfsdk:"aws_secret_access_key"` + // The underlying provider in Amazon Bedrock. Supported values (case + // insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. + BedrockProvider AmazonBedrockConfigBedrockProvider `tfsdk:"bedrock_provider"` +} + +// The underlying provider in Amazon Bedrock. Supported values (case +// insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. +type AmazonBedrockConfigBedrockProvider string + +const AmazonBedrockConfigBedrockProviderAi21labs AmazonBedrockConfigBedrockProvider = `ai21labs` + +const AmazonBedrockConfigBedrockProviderAmazon AmazonBedrockConfigBedrockProvider = `amazon` + +const AmazonBedrockConfigBedrockProviderAnthropic AmazonBedrockConfigBedrockProvider = `anthropic` + +const AmazonBedrockConfigBedrockProviderCohere AmazonBedrockConfigBedrockProvider = `cohere` + +// String representation for [fmt.Print] +func (f *AmazonBedrockConfigBedrockProvider) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AmazonBedrockConfigBedrockProvider) Set(v string) error { + switch v { + case `ai21labs`, `amazon`, `anthropic`, `cohere`: + *f = AmazonBedrockConfigBedrockProvider(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ai21labs", "amazon", "anthropic", "cohere"`, v) + } +} + +// Type always returns AmazonBedrockConfigBedrockProvider to satisfy [pflag.Value] interface +func (f *AmazonBedrockConfigBedrockProvider) Type() string { + return "AmazonBedrockConfigBedrockProvider" +} + +type AnthropicConfig struct { + // The Databricks secret key reference for an Anthropic API key. + AnthropicApiKey types.String `tfsdk:"anthropic_api_key"` +} + +type App struct { + // The active deployment of the app. + ActiveDeployment *AppDeployment `tfsdk:"active_deployment"` + // The creation time of the app. Formatted timestamp in ISO 6801. + CreateTime types.String `tfsdk:"create_time"` + // The email of the user that created the app. + Creator types.String `tfsdk:"creator"` + // The description of the app. + Description types.String `tfsdk:"description"` + // The name of the app. The name must contain only lowercase alphanumeric + // characters and hyphens. It must be unique within the workspace. + Name types.String `tfsdk:"name"` + // The pending deployment of the app. + PendingDeployment *AppDeployment `tfsdk:"pending_deployment"` + + ServicePrincipalId types.Int64 `tfsdk:"service_principal_id"` + + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + + Status *AppStatus `tfsdk:"status"` + // The update time of the app. Formatted timestamp in ISO 6801. + UpdateTime types.String `tfsdk:"update_time"` + // The email of the user that last updated the app. + Updater types.String `tfsdk:"updater"` + // The URL of the app once it is deployed. + Url types.String `tfsdk:"url"` +} + +type AppDeployment struct { + // The creation time of the deployment. Formatted timestamp in ISO 6801. + CreateTime types.String `tfsdk:"create_time"` + // The email of the user creates the deployment. + Creator types.String `tfsdk:"creator"` + // The deployment artifacts for an app. + DeploymentArtifacts *AppDeploymentArtifacts `tfsdk:"deployment_artifacts"` + // The unique id of the deployment. + DeploymentId types.String `tfsdk:"deployment_id"` + // The mode of which the deployment will manage the source code. + Mode AppDeploymentMode `tfsdk:"mode"` + // The workspace file system path of the source code used to create the app + // deployment. This is different from + // `deployment_artifacts.source_code_path`, which is the path used by the + // deployed app. The former refers to the original source code location of + // the app in the workspace during deployment creation, whereas the latter + // provides a system generated stable snapshotted source code path used by + // the deployment. + SourceCodePath types.String `tfsdk:"source_code_path"` + // Status and status message of the deployment + Status *AppDeploymentStatus `tfsdk:"status"` + // The update time of the deployment. Formatted timestamp in ISO 6801. + UpdateTime types.String `tfsdk:"update_time"` +} + +type AppDeploymentArtifacts struct { + // The snapshotted workspace file system path of the source code loaded by + // the deployed app. + SourceCodePath types.String `tfsdk:"source_code_path"` +} + +type AppDeploymentMode string + +const AppDeploymentModeAutoSync AppDeploymentMode = `AUTO_SYNC` + +const AppDeploymentModeModeUnspecified AppDeploymentMode = `MODE_UNSPECIFIED` + +const AppDeploymentModeSnapshot AppDeploymentMode = `SNAPSHOT` + +// String representation for [fmt.Print] +func (f *AppDeploymentMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppDeploymentMode) Set(v string) error { + switch v { + case `AUTO_SYNC`, `MODE_UNSPECIFIED`, `SNAPSHOT`: + *f = AppDeploymentMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUTO_SYNC", "MODE_UNSPECIFIED", "SNAPSHOT"`, v) + } +} + +// Type always returns AppDeploymentMode to satisfy [pflag.Value] interface +func (f *AppDeploymentMode) Type() string { + return "AppDeploymentMode" +} + +type AppDeploymentState string + +const AppDeploymentStateFailed AppDeploymentState = `FAILED` + +const AppDeploymentStateInProgress AppDeploymentState = `IN_PROGRESS` + +const AppDeploymentStateStateUnspecified AppDeploymentState = `STATE_UNSPECIFIED` + +const AppDeploymentStateStopped AppDeploymentState = `STOPPED` + +const AppDeploymentStateSucceeded AppDeploymentState = `SUCCEEDED` + +// String representation for [fmt.Print] +func (f *AppDeploymentState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppDeploymentState) Set(v string) error { + switch v { + case `FAILED`, `IN_PROGRESS`, `STATE_UNSPECIFIED`, `STOPPED`, `SUCCEEDED`: + *f = AppDeploymentState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILED", "IN_PROGRESS", "STATE_UNSPECIFIED", "STOPPED", "SUCCEEDED"`, v) + } +} + +// Type always returns AppDeploymentState to satisfy [pflag.Value] interface +func (f *AppDeploymentState) Type() string { + return "AppDeploymentState" +} + +type AppDeploymentStatus struct { + // Message corresponding with the deployment state. + Message types.String `tfsdk:"message"` + // State of the deployment. + State AppDeploymentState `tfsdk:"state"` +} + +type AppEnvironment struct { + Env []EnvVariable `tfsdk:"env"` +} + +type AppState string + +const AppStateCreating AppState = `CREATING` + +const AppStateDeleted AppState = `DELETED` + +const AppStateDeleting AppState = `DELETING` + +const AppStateError AppState = `ERROR` + +const AppStateIdle AppState = `IDLE` + +const AppStateRunning AppState = `RUNNING` + +const AppStateStarting AppState = `STARTING` + +const AppStateStateUnspecified AppState = `STATE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *AppState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AppState) Set(v string) error { + switch v { + case `CREATING`, `DELETED`, `DELETING`, `ERROR`, `IDLE`, `RUNNING`, `STARTING`, `STATE_UNSPECIFIED`: + *f = AppState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CREATING", "DELETED", "DELETING", "ERROR", "IDLE", "RUNNING", "STARTING", "STATE_UNSPECIFIED"`, v) + } +} + +// Type always returns AppState to satisfy [pflag.Value] interface +func (f *AppState) Type() string { + return "AppState" +} + +type AppStatus struct { + // Message corresponding with the app state. + Message types.String `tfsdk:"message"` + // State of the app. + State AppState `tfsdk:"state"` +} + +type AutoCaptureConfigInput struct { + // The name of the catalog in Unity Catalog. NOTE: On update, you cannot + // change the catalog name if the inference table is already enabled. + CatalogName types.String `tfsdk:"catalog_name"` + // Indicates whether the inference table is enabled. + Enabled types.Bool `tfsdk:"enabled"` + // The name of the schema in Unity Catalog. NOTE: On update, you cannot + // change the schema name if the inference table is already enabled. + SchemaName types.String `tfsdk:"schema_name"` + // The prefix of the table in Unity Catalog. NOTE: On update, you cannot + // change the prefix name if the inference table is already enabled. + TableNamePrefix types.String `tfsdk:"table_name_prefix"` +} + +type AutoCaptureConfigOutput struct { + // The name of the catalog in Unity Catalog. + CatalogName types.String `tfsdk:"catalog_name"` + // Indicates whether the inference table is enabled. + Enabled types.Bool `tfsdk:"enabled"` + // The name of the schema in Unity Catalog. + SchemaName types.String `tfsdk:"schema_name"` + + State *AutoCaptureState `tfsdk:"state"` + // The prefix of the table in Unity Catalog. + TableNamePrefix types.String `tfsdk:"table_name_prefix"` +} + +type AutoCaptureState struct { + PayloadTable *PayloadTable `tfsdk:"payload_table"` +} + +// Get build logs for a served model +type BuildLogsRequest struct { + // The name of the serving endpoint that the served model belongs to. This + // field is required. + Name types.String `tfsdk:"-" url:"-"` + // The name of the served model that build logs will be retrieved for. This + // field is required. + ServedModelName types.String `tfsdk:"-" url:"-"` +} + +type BuildLogsResponse struct { + // The logs associated with building the served entity's environment. + Logs types.String `tfsdk:"logs"` +} + +type ChatMessage struct { + // The content of the message. + Content types.String `tfsdk:"content"` + // The role of the message. One of [system, user, assistant]. + Role ChatMessageRole `tfsdk:"role"` +} + +// The role of the message. One of [system, user, assistant]. +type ChatMessageRole string + +const ChatMessageRoleAssistant ChatMessageRole = `assistant` + +const ChatMessageRoleSystem ChatMessageRole = `system` + +const ChatMessageRoleUser ChatMessageRole = `user` + +// String representation for [fmt.Print] +func (f *ChatMessageRole) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ChatMessageRole) Set(v string) error { + switch v { + case `assistant`, `system`, `user`: + *f = ChatMessageRole(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "assistant", "system", "user"`, v) + } +} + +// Type always returns ChatMessageRole to satisfy [pflag.Value] interface +func (f *ChatMessageRole) Type() string { + return "ChatMessageRole" +} + +type CohereConfig struct { + // The Databricks secret key reference for a Cohere API key. + CohereApiKey types.String `tfsdk:"cohere_api_key"` +} + +type CreateAppDeploymentRequest struct { + // The name of the app. + AppName types.String `tfsdk:"-" url:"-"` + // The mode of which the deployment will manage the source code. + Mode AppDeploymentMode `tfsdk:"mode"` + // The workspace file system path of the source code used to create the app + // deployment. This is different from + // `deployment_artifacts.source_code_path`, which is the path used by the + // deployed app. The former refers to the original source code location of + // the app in the workspace during deployment creation, whereas the latter + // provides a system generated stable snapshotted source code path used by + // the deployment. + SourceCodePath types.String `tfsdk:"source_code_path"` +} + +type CreateAppRequest struct { + // The description of the app. + Description types.String `tfsdk:"description"` + // The name of the app. The name must contain only lowercase alphanumeric + // characters and hyphens. It must be unique within the workspace. + Name types.String `tfsdk:"name"` +} + +type CreateServingEndpoint struct { + // The core config of the serving endpoint. + Config EndpointCoreConfigInput `tfsdk:"config"` + // The name of the serving endpoint. This field is required and must be + // unique across a Databricks workspace. An endpoint name can consist of + // alphanumeric characters, dashes, and underscores. + Name types.String `tfsdk:"name"` + // Rate limits to be applied to the serving endpoint. NOTE: only external + // and foundation model endpoints are supported as of now. + RateLimits []RateLimit `tfsdk:"rate_limits"` + // Enable route optimization for the serving endpoint. + RouteOptimized types.Bool `tfsdk:"route_optimized"` + // Tags to be attached to the serving endpoint and automatically propagated + // to billing logs. + Tags []EndpointTag `tfsdk:"tags"` +} + +type DatabricksModelServingConfig struct { + // The Databricks secret key reference for a Databricks API token that + // corresponds to a user or service principal with Can Query access to the + // model serving endpoint pointed to by this external model. + DatabricksApiToken types.String `tfsdk:"databricks_api_token"` + // The URL of the Databricks workspace containing the model serving endpoint + // pointed to by this external model. + DatabricksWorkspaceUrl types.String `tfsdk:"databricks_workspace_url"` +} + +type DataframeSplitInput struct { + Columns []any `tfsdk:"columns"` + + Data []any `tfsdk:"data"` + + Index []types.Int64 `tfsdk:"index"` +} + +// Delete an app +type DeleteAppRequest struct { + // The name of the app. + Name types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete a serving endpoint +type DeleteServingEndpointRequest struct { + // The name of the serving endpoint. This field is required. + Name types.String `tfsdk:"-" url:"-"` +} + +type EmbeddingsV1ResponseEmbeddingElement struct { + Embedding []types.Float64 `tfsdk:"embedding"` + // The index of the embedding in the response. + Index types.Int64 `tfsdk:"index"` + // This will always be 'embedding'. + Object EmbeddingsV1ResponseEmbeddingElementObject `tfsdk:"object"` +} + +// This will always be 'embedding'. +type EmbeddingsV1ResponseEmbeddingElementObject string + +const EmbeddingsV1ResponseEmbeddingElementObjectEmbedding EmbeddingsV1ResponseEmbeddingElementObject = `embedding` + +// String representation for [fmt.Print] +func (f *EmbeddingsV1ResponseEmbeddingElementObject) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EmbeddingsV1ResponseEmbeddingElementObject) Set(v string) error { + switch v { + case `embedding`: + *f = EmbeddingsV1ResponseEmbeddingElementObject(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "embedding"`, v) + } +} + +// Type always returns EmbeddingsV1ResponseEmbeddingElementObject to satisfy [pflag.Value] interface +func (f *EmbeddingsV1ResponseEmbeddingElementObject) Type() string { + return "EmbeddingsV1ResponseEmbeddingElementObject" +} + +type EndpointCoreConfigInput struct { + // Configuration for Inference Tables which automatically logs requests and + // responses to Unity Catalog. + AutoCaptureConfig *AutoCaptureConfigInput `tfsdk:"auto_capture_config"` + // The name of the serving endpoint to update. This field is required. + Name types.String `tfsdk:"-" url:"-"` + // A list of served entities for the endpoint to serve. A serving endpoint + // can have up to 15 served entities. + ServedEntities []ServedEntityInput `tfsdk:"served_entities"` + // (Deprecated, use served_entities instead) A list of served models for the + // endpoint to serve. A serving endpoint can have up to 15 served models. + ServedModels []ServedModelInput `tfsdk:"served_models"` + // The traffic config defining how invocations to the serving endpoint + // should be routed. + TrafficConfig *TrafficConfig `tfsdk:"traffic_config"` +} + +type EndpointCoreConfigOutput struct { + // Configuration for Inference Tables which automatically logs requests and + // responses to Unity Catalog. + AutoCaptureConfig *AutoCaptureConfigOutput `tfsdk:"auto_capture_config"` + // The config version that the serving endpoint is currently serving. + ConfigVersion types.Int64 `tfsdk:"config_version"` + // The list of served entities under the serving endpoint config. + ServedEntities []ServedEntityOutput `tfsdk:"served_entities"` + // (Deprecated, use served_entities instead) The list of served models under + // the serving endpoint config. + ServedModels []ServedModelOutput `tfsdk:"served_models"` + // The traffic configuration associated with the serving endpoint config. + TrafficConfig *TrafficConfig `tfsdk:"traffic_config"` +} + +type EndpointCoreConfigSummary struct { + // The list of served entities under the serving endpoint config. + ServedEntities []ServedEntitySpec `tfsdk:"served_entities"` + // (Deprecated, use served_entities instead) The list of served models under + // the serving endpoint config. + ServedModels []ServedModelSpec `tfsdk:"served_models"` +} + +type EndpointPendingConfig struct { + // Configuration for Inference Tables which automatically logs requests and + // responses to Unity Catalog. + AutoCaptureConfig *AutoCaptureConfigOutput `tfsdk:"auto_capture_config"` + // The config version that the serving endpoint is currently serving. + ConfigVersion types.Int64 `tfsdk:"config_version"` + // The list of served entities belonging to the last issued update to the + // serving endpoint. + ServedEntities []ServedEntityOutput `tfsdk:"served_entities"` + // (Deprecated, use served_entities instead) The list of served models + // belonging to the last issued update to the serving endpoint. + ServedModels []ServedModelOutput `tfsdk:"served_models"` + // The timestamp when the update to the pending config started. + StartTime types.Int64 `tfsdk:"start_time"` + // The traffic config defining how invocations to the serving endpoint + // should be routed. + TrafficConfig *TrafficConfig `tfsdk:"traffic_config"` +} + +type EndpointState struct { + // The state of an endpoint's config update. This informs the user if the + // pending_config is in progress, if the update failed, or if there is no + // update in progress. Note that if the endpoint's config_update state value + // is IN_PROGRESS, another update can not be made until the update completes + // or fails." + ConfigUpdate EndpointStateConfigUpdate `tfsdk:"config_update"` + // The state of an endpoint, indicating whether or not the endpoint is + // queryable. An endpoint is READY if all of the served entities in its + // active configuration are ready. If any of the actively served entities + // are in a non-ready state, the endpoint state will be NOT_READY. + Ready EndpointStateReady `tfsdk:"ready"` +} + +// The state of an endpoint's config update. This informs the user if the +// pending_config is in progress, if the update failed, or if there is no update +// in progress. Note that if the endpoint's config_update state value is +// IN_PROGRESS, another update can not be made until the update completes or +// fails." +type EndpointStateConfigUpdate string + +const EndpointStateConfigUpdateInProgress EndpointStateConfigUpdate = `IN_PROGRESS` + +const EndpointStateConfigUpdateNotUpdating EndpointStateConfigUpdate = `NOT_UPDATING` + +const EndpointStateConfigUpdateUpdateFailed EndpointStateConfigUpdate = `UPDATE_FAILED` + +// String representation for [fmt.Print] +func (f *EndpointStateConfigUpdate) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointStateConfigUpdate) Set(v string) error { + switch v { + case `IN_PROGRESS`, `NOT_UPDATING`, `UPDATE_FAILED`: + *f = EndpointStateConfigUpdate(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "IN_PROGRESS", "NOT_UPDATING", "UPDATE_FAILED"`, v) + } +} + +// Type always returns EndpointStateConfigUpdate to satisfy [pflag.Value] interface +func (f *EndpointStateConfigUpdate) Type() string { + return "EndpointStateConfigUpdate" +} + +// The state of an endpoint, indicating whether or not the endpoint is +// queryable. An endpoint is READY if all of the served entities in its active +// configuration are ready. If any of the actively served entities are in a +// non-ready state, the endpoint state will be NOT_READY. +type EndpointStateReady string + +const EndpointStateReadyNotReady EndpointStateReady = `NOT_READY` + +const EndpointStateReadyReady EndpointStateReady = `READY` + +// String representation for [fmt.Print] +func (f *EndpointStateReady) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointStateReady) Set(v string) error { + switch v { + case `NOT_READY`, `READY`: + *f = EndpointStateReady(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "NOT_READY", "READY"`, v) + } +} + +// Type always returns EndpointStateReady to satisfy [pflag.Value] interface +func (f *EndpointStateReady) Type() string { + return "EndpointStateReady" +} + +type EndpointTag struct { + // Key field for a serving endpoint tag. + Key types.String `tfsdk:"key"` + // Optional value field for a serving endpoint tag. + Value types.String `tfsdk:"value"` +} + +type EnvVariable struct { + Name types.String `tfsdk:"name"` + + Value types.String `tfsdk:"value"` + + ValueFrom types.String `tfsdk:"value_from"` +} + +// Get metrics of a serving endpoint +type ExportMetricsRequest struct { + // The name of the serving endpoint to retrieve metrics for. This field is + // required. + Name types.String `tfsdk:"-" url:"-"` +} + +type ExportMetricsResponse struct { + Contents io.ReadCloser `tfsdk:"-"` +} + +type ExternalModel struct { + // AI21Labs Config. Only required if the provider is 'ai21labs'. + Ai21labsConfig *Ai21LabsConfig `tfsdk:"ai21labs_config"` + // Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'. + AmazonBedrockConfig *AmazonBedrockConfig `tfsdk:"amazon_bedrock_config"` + // Anthropic Config. Only required if the provider is 'anthropic'. + AnthropicConfig *AnthropicConfig `tfsdk:"anthropic_config"` + // Cohere Config. Only required if the provider is 'cohere'. + CohereConfig *CohereConfig `tfsdk:"cohere_config"` + // Databricks Model Serving Config. Only required if the provider is + // 'databricks-model-serving'. + DatabricksModelServingConfig *DatabricksModelServingConfig `tfsdk:"databricks_model_serving_config"` + // The name of the external model. + Name types.String `tfsdk:"name"` + // OpenAI Config. Only required if the provider is 'openai'. + OpenaiConfig *OpenAiConfig `tfsdk:"openai_config"` + // PaLM Config. Only required if the provider is 'palm'. + PalmConfig *PaLmConfig `tfsdk:"palm_config"` + // The name of the provider for the external model. Currently, the supported + // providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', + // 'databricks-model-serving', 'openai', and 'palm'.", + Provider ExternalModelProvider `tfsdk:"provider"` + // The task type of the external model. + Task types.String `tfsdk:"task"` +} + +// The name of the provider for the external model. Currently, the supported +// providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', +// 'databricks-model-serving', 'openai', and 'palm'.", +type ExternalModelProvider string + +const ExternalModelProviderAi21labs ExternalModelProvider = `ai21labs` + +const ExternalModelProviderAmazonBedrock ExternalModelProvider = `amazon-bedrock` + +const ExternalModelProviderAnthropic ExternalModelProvider = `anthropic` + +const ExternalModelProviderCohere ExternalModelProvider = `cohere` + +const ExternalModelProviderDatabricksModelServing ExternalModelProvider = `databricks-model-serving` + +const ExternalModelProviderOpenai ExternalModelProvider = `openai` + +const ExternalModelProviderPalm ExternalModelProvider = `palm` + +// String representation for [fmt.Print] +func (f *ExternalModelProvider) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExternalModelProvider) Set(v string) error { + switch v { + case `ai21labs`, `amazon-bedrock`, `anthropic`, `cohere`, `databricks-model-serving`, `openai`, `palm`: + *f = ExternalModelProvider(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ai21labs", "amazon-bedrock", "anthropic", "cohere", "databricks-model-serving", "openai", "palm"`, v) + } +} + +// Type always returns ExternalModelProvider to satisfy [pflag.Value] interface +func (f *ExternalModelProvider) Type() string { + return "ExternalModelProvider" +} + +type ExternalModelUsageElement struct { + // The number of tokens in the chat/completions response. + CompletionTokens types.Int64 `tfsdk:"completion_tokens"` + // The number of tokens in the prompt. + PromptTokens types.Int64 `tfsdk:"prompt_tokens"` + // The total number of tokens in the prompt and response. + TotalTokens types.Int64 `tfsdk:"total_tokens"` +} + +type FoundationModel struct { + // The description of the foundation model. + Description types.String `tfsdk:"description"` + // The display name of the foundation model. + DisplayName types.String `tfsdk:"display_name"` + // The URL to the documentation of the foundation model. + Docs types.String `tfsdk:"docs"` + // The name of the foundation model. + Name types.String `tfsdk:"name"` +} + +// Get an app deployment +type GetAppDeploymentRequest struct { + // The name of the app. + AppName types.String `tfsdk:"-" url:"-"` + // The unique id of the deployment. + DeploymentId types.String `tfsdk:"-" url:"-"` +} + +// Get app environment +type GetAppEnvironmentRequest struct { + // The name of the app. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get an app +type GetAppRequest struct { + // The name of the app. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get the schema for a serving endpoint +type GetOpenApiRequest struct { + // The name of the serving endpoint that the served model belongs to. This + // field is required. + Name types.String `tfsdk:"-" url:"-"` +} + +// The response is an OpenAPI spec in JSON format that typically includes fields +// like openapi, info, servers and paths, etc. +type GetOpenApiResponse struct { +} + +// Get serving endpoint permission levels +type GetServingEndpointPermissionLevelsRequest struct { + // The serving endpoint for which to get or manage permissions. + ServingEndpointId types.String `tfsdk:"-" url:"-"` +} + +type GetServingEndpointPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []ServingEndpointPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get serving endpoint permissions +type GetServingEndpointPermissionsRequest struct { + // The serving endpoint for which to get or manage permissions. + ServingEndpointId types.String `tfsdk:"-" url:"-"` +} + +// Get a single serving endpoint +type GetServingEndpointRequest struct { + // The name of the serving endpoint. This field is required. + Name types.String `tfsdk:"-" url:"-"` +} + +// List app deployments +type ListAppDeploymentsRequest struct { + // The name of the app. + AppName types.String `tfsdk:"-" url:"-"` + // Upper bound for items returned. + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + // Pagination token to go to the next page of apps. Requests first page if + // absent. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListAppDeploymentsResponse struct { + // Deployment history of the app. + AppDeployments []AppDeployment `tfsdk:"app_deployments"` + // Pagination token to request the next page of apps. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List apps +type ListAppsRequest struct { + // Upper bound for items returned. + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + // Pagination token to go to the next page of apps. Requests first page if + // absent. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListAppsResponse struct { + Apps []App `tfsdk:"apps"` + // Pagination token to request the next page of apps. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +type ListEndpointsResponse struct { + // The list of endpoints. + Endpoints []ServingEndpoint `tfsdk:"endpoints"` +} + +// Get the latest logs for a served model +type LogsRequest struct { + // The name of the serving endpoint that the served model belongs to. This + // field is required. + Name types.String `tfsdk:"-" url:"-"` + // The name of the served model that logs will be retrieved for. This field + // is required. + ServedModelName types.String `tfsdk:"-" url:"-"` +} + +type ModelDataPlaneInfo struct { + // Information required to query DataPlane API 'query' endpoint. + QueryInfo *oauth2.DataPlaneInfo `tfsdk:"query_info"` +} + +type OpenAiConfig struct { + // This field is only required for Azure AD OpenAI and is the Microsoft + // Entra Client ID. + MicrosoftEntraClientId types.String `tfsdk:"microsoft_entra_client_id"` + // The Databricks secret key reference for the Microsoft Entra Client Secret + // that is only required for Azure AD OpenAI. + MicrosoftEntraClientSecret types.String `tfsdk:"microsoft_entra_client_secret"` + // This field is only required for Azure AD OpenAI and is the Microsoft + // Entra Tenant ID. + MicrosoftEntraTenantId types.String `tfsdk:"microsoft_entra_tenant_id"` + // This is the base URL for the OpenAI API (default: + // "https://api.openai.com/v1"). For Azure OpenAI, this field is required, + // and is the base URL for the Azure OpenAI API service provided by Azure. + OpenaiApiBase types.String `tfsdk:"openai_api_base"` + // The Databricks secret key reference for an OpenAI or Azure OpenAI API + // key. + OpenaiApiKey types.String `tfsdk:"openai_api_key"` + // This is an optional field to specify the type of OpenAI API to use. For + // Azure OpenAI, this field is required, and adjust this parameter to + // represent the preferred security access validation protocol. For access + // token validation, use azure. For authentication using Azure Active + // Directory (Azure AD) use, azuread. + OpenaiApiType types.String `tfsdk:"openai_api_type"` + // This is an optional field to specify the OpenAI API version. For Azure + // OpenAI, this field is required, and is the version of the Azure OpenAI + // service to utilize, specified by a date. + OpenaiApiVersion types.String `tfsdk:"openai_api_version"` + // This field is only required for Azure OpenAI and is the name of the + // deployment resource for the Azure OpenAI service. + OpenaiDeploymentName types.String `tfsdk:"openai_deployment_name"` + // This is an optional field to specify the organization in OpenAI or Azure + // OpenAI. + OpenaiOrganization types.String `tfsdk:"openai_organization"` +} + +type PaLmConfig struct { + // The Databricks secret key reference for a PaLM API key. + PalmApiKey types.String `tfsdk:"palm_api_key"` +} + +type PatchServingEndpointTags struct { + // List of endpoint tags to add + AddTags []EndpointTag `tfsdk:"add_tags"` + // List of tag keys to delete + DeleteTags []types.String `tfsdk:"delete_tags"` + // The name of the serving endpoint who's tags to patch. This field is + // required. + Name types.String `tfsdk:"-" url:"-"` +} + +type PayloadTable struct { + // The name of the payload table. + Name types.String `tfsdk:"name"` + // The status of the payload table. + Status types.String `tfsdk:"status"` + // The status message of the payload table. + StatusMessage types.String `tfsdk:"status_message"` +} + +// Update rate limits of a serving endpoint +type PutRequest struct { + // The name of the serving endpoint whose rate limits are being updated. + // This field is required. + Name types.String `tfsdk:"-" url:"-"` + // The list of endpoint rate limits. + RateLimits []RateLimit `tfsdk:"rate_limits"` +} + +type PutResponse struct { + // The list of endpoint rate limits. + RateLimits []RateLimit `tfsdk:"rate_limits"` +} + +type QueryEndpointInput struct { + // Pandas Dataframe input in the records orientation. + DataframeRecords []any `tfsdk:"dataframe_records"` + // Pandas Dataframe input in the split orientation. + DataframeSplit *DataframeSplitInput `tfsdk:"dataframe_split"` + // The extra parameters field used ONLY for __completions, chat,__ and + // __embeddings external & foundation model__ serving endpoints. This is a + // map of strings and should only be used with other external/foundation + // model query fields. + ExtraParams map[string]types.String `tfsdk:"extra_params"` + // The input string (or array of strings) field used ONLY for __embeddings + // external & foundation model__ serving endpoints and is the only field + // (along with extra_params if needed) used by embeddings queries. + Input any `tfsdk:"input"` + // Tensor-based input in columnar format. + Inputs any `tfsdk:"inputs"` + // Tensor-based input in row format. + Instances []any `tfsdk:"instances"` + // The max tokens field used ONLY for __completions__ and __chat external & + // foundation model__ serving endpoints. This is an integer and should only + // be used with other chat/completions query fields. + MaxTokens types.Int64 `tfsdk:"max_tokens"` + // The messages field used ONLY for __chat external & foundation model__ + // serving endpoints. This is a map of strings and should only be used with + // other chat query fields. + Messages []ChatMessage `tfsdk:"messages"` + // The n (number of candidates) field used ONLY for __completions__ and + // __chat external & foundation model__ serving endpoints. This is an + // integer between 1 and 5 with a default of 1 and should only be used with + // other chat/completions query fields. + N types.Int64 `tfsdk:"n"` + // The name of the serving endpoint. This field is required. + Name types.String `tfsdk:"-" url:"-"` + // The prompt string (or array of strings) field used ONLY for __completions + // external & foundation model__ serving endpoints and should only be used + // with other completions query fields. + Prompt any `tfsdk:"prompt"` + // The stop sequences field used ONLY for __completions__ and __chat + // external & foundation model__ serving endpoints. This is a list of + // strings and should only be used with other chat/completions query fields. + Stop []types.String `tfsdk:"stop"` + // The stream field used ONLY for __completions__ and __chat external & + // foundation model__ serving endpoints. This is a boolean defaulting to + // false and should only be used with other chat/completions query fields. + Stream types.Bool `tfsdk:"stream"` + // The temperature field used ONLY for __completions__ and __chat external & + // foundation model__ serving endpoints. This is a float between 0.0 and 2.0 + // with a default of 1.0 and should only be used with other chat/completions + // query fields. + Temperature types.Float64 `tfsdk:"temperature"` +} + +type QueryEndpointResponse struct { + // The list of choices returned by the __chat or completions + // external/foundation model__ serving endpoint. + Choices []V1ResponseChoiceElement `tfsdk:"choices"` + // The timestamp in seconds when the query was created in Unix time returned + // by a __completions or chat external/foundation model__ serving endpoint. + Created types.Int64 `tfsdk:"created"` + // The list of the embeddings returned by the __embeddings + // external/foundation model__ serving endpoint. + Data []EmbeddingsV1ResponseEmbeddingElement `tfsdk:"data"` + // The ID of the query that may be returned by a __completions or chat + // external/foundation model__ serving endpoint. + Id types.String `tfsdk:"id"` + // The name of the __external/foundation model__ used for querying. This is + // the name of the model that was specified in the endpoint config. + Model types.String `tfsdk:"model"` + // The type of object returned by the __external/foundation model__ serving + // endpoint, one of [text_completion, chat.completion, list (of + // embeddings)]. + Object QueryEndpointResponseObject `tfsdk:"object"` + // The predictions returned by the serving endpoint. + Predictions []any `tfsdk:"predictions"` + // The name of the served model that served the request. This is useful when + // there are multiple models behind the same endpoint with traffic split. + ServedModelName types.String `tfsdk:"-" url:"-" header:"served-model-name,omitempty"` + // The usage object that may be returned by the __external/foundation + // model__ serving endpoint. This contains information about the number of + // tokens used in the prompt and response. + Usage *ExternalModelUsageElement `tfsdk:"usage"` +} + +// The type of object returned by the __external/foundation model__ serving +// endpoint, one of [text_completion, chat.completion, list (of embeddings)]. +type QueryEndpointResponseObject string + +const QueryEndpointResponseObjectChatCompletion QueryEndpointResponseObject = `chat.completion` + +const QueryEndpointResponseObjectList QueryEndpointResponseObject = `list` + +const QueryEndpointResponseObjectTextCompletion QueryEndpointResponseObject = `text_completion` + +// String representation for [fmt.Print] +func (f *QueryEndpointResponseObject) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *QueryEndpointResponseObject) Set(v string) error { + switch v { + case `chat.completion`, `list`, `text_completion`: + *f = QueryEndpointResponseObject(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "chat.completion", "list", "text_completion"`, v) + } +} + +// Type always returns QueryEndpointResponseObject to satisfy [pflag.Value] interface +func (f *QueryEndpointResponseObject) Type() string { + return "QueryEndpointResponseObject" +} + +type RateLimit struct { + // Used to specify how many calls are allowed for a key within the + // renewal_period. + Calls types.Int64 `tfsdk:"calls"` + // Key field for a serving endpoint rate limit. Currently, only 'user' and + // 'endpoint' are supported, with 'endpoint' being the default if not + // specified. + Key RateLimitKey `tfsdk:"key"` + // Renewal period field for a serving endpoint rate limit. Currently, only + // 'minute' is supported. + RenewalPeriod RateLimitRenewalPeriod `tfsdk:"renewal_period"` +} + +// Key field for a serving endpoint rate limit. Currently, only 'user' and +// 'endpoint' are supported, with 'endpoint' being the default if not specified. +type RateLimitKey string + +const RateLimitKeyEndpoint RateLimitKey = `endpoint` + +const RateLimitKeyUser RateLimitKey = `user` + +// String representation for [fmt.Print] +func (f *RateLimitKey) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RateLimitKey) Set(v string) error { + switch v { + case `endpoint`, `user`: + *f = RateLimitKey(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "endpoint", "user"`, v) + } +} + +// Type always returns RateLimitKey to satisfy [pflag.Value] interface +func (f *RateLimitKey) Type() string { + return "RateLimitKey" +} + +// Renewal period field for a serving endpoint rate limit. Currently, only +// 'minute' is supported. +type RateLimitRenewalPeriod string + +const RateLimitRenewalPeriodMinute RateLimitRenewalPeriod = `minute` + +// String representation for [fmt.Print] +func (f *RateLimitRenewalPeriod) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RateLimitRenewalPeriod) Set(v string) error { + switch v { + case `minute`: + *f = RateLimitRenewalPeriod(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "minute"`, v) + } +} + +// Type always returns RateLimitRenewalPeriod to satisfy [pflag.Value] interface +func (f *RateLimitRenewalPeriod) Type() string { + return "RateLimitRenewalPeriod" +} + +type Route struct { + // The name of the served model this route configures traffic for. + ServedModelName types.String `tfsdk:"served_model_name"` + // The percentage of endpoint traffic to send to this route. It must be an + // integer between 0 and 100 inclusive. + TrafficPercentage types.Int64 `tfsdk:"traffic_percentage"` +} + +type ServedEntityInput struct { + // The name of the entity to be served. The entity may be a model in the + // Databricks Model Registry, a model in the Unity Catalog (UC), or a + // function of type FEATURE_SPEC in the UC. If it is a UC object, the full + // name of the object should be given in the form of + // __catalog_name__.__schema_name__.__model_name__. + EntityName types.String `tfsdk:"entity_name"` + // The version of the model in Databricks Model Registry to be served or + // empty if the entity is a FEATURE_SPEC. + EntityVersion types.String `tfsdk:"entity_version"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs used for serving this entity. Note: this is an + // experimental feature and subject to change. Example entity environment + // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": + // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": + // "{{secrets/my_scope2/my_key2}}"}` + EnvironmentVars map[string]types.String `tfsdk:"environment_vars"` + // The external model to be served. NOTE: Only one of external_model and + // (entity_name, entity_version, workload_size, workload_type, and + // scale_to_zero_enabled) can be specified with the latter set being used + // for custom model serving for a Databricks registered model. When an + // external_model is present, the served entities list can only have one + // served_entity object. For an existing endpoint with external_model, it + // can not be updated to an endpoint without external_model. If the endpoint + // is created without external_model, users cannot update it to add + // external_model later. + ExternalModel *ExternalModel `tfsdk:"external_model"` + // ARN of the instance profile that the served entity uses to access AWS + // resources. + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // The maximum tokens per second that the endpoint can scale up to. + MaxProvisionedThroughput types.Int64 `tfsdk:"max_provisioned_throughput"` + // The minimum tokens per second that the endpoint can scale down to. + MinProvisionedThroughput types.Int64 `tfsdk:"min_provisioned_throughput"` + // The name of a served entity. It must be unique across an endpoint. A + // served entity name can consist of alphanumeric characters, dashes, and + // underscores. If not specified for an external model, this field defaults + // to external_model.name, with '.' and ':' replaced with '-', and if not + // specified for other entities, it defaults to + // -. + Name types.String `tfsdk:"name"` + // Whether the compute resources for the served entity should scale down to + // zero. + ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled"` + // The workload size of the served entity. The workload size corresponds to + // a range of provisioned concurrency that the compute autoscales between. A + // single unit of provisioned concurrency can process one request at a time. + // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 + // provisioned concurrency). If scale-to-zero is enabled, the lower bound of + // the provisioned concurrency for each workload size is 0. + WorkloadSize types.String `tfsdk:"workload_size"` + // The workload type of the served entity. The workload type selects which + // type of compute to use in the endpoint. The default value for this + // parameter is "CPU". For deep learning workloads, GPU acceleration is + // available by selecting workload types like GPU_SMALL and others. See the + // available [GPU types]. + // + // [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType types.String `tfsdk:"workload_type"` +} + +type ServedEntityOutput struct { + // The creation timestamp of the served entity in Unix time. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // The email of the user who created the served entity. + Creator types.String `tfsdk:"creator"` + // The name of the entity served. The entity may be a model in the + // Databricks Model Registry, a model in the Unity Catalog (UC), or a + // function of type FEATURE_SPEC in the UC. If it is a UC object, the full + // name of the object is given in the form of + // __catalog_name__.__schema_name__.__model_name__. + EntityName types.String `tfsdk:"entity_name"` + // The version of the served entity in Databricks Model Registry or empty if + // the entity is a FEATURE_SPEC. + EntityVersion types.String `tfsdk:"entity_version"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs used for serving this entity. Note: this is an + // experimental feature and subject to change. Example entity environment + // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": + // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": + // "{{secrets/my_scope2/my_key2}}"}` + EnvironmentVars map[string]types.String `tfsdk:"environment_vars"` + // The external model that is served. NOTE: Only one of external_model, + // foundation_model, and (entity_name, entity_version, workload_size, + // workload_type, and scale_to_zero_enabled) is returned based on the + // endpoint type. + ExternalModel *ExternalModel `tfsdk:"external_model"` + // The foundation model that is served. NOTE: Only one of foundation_model, + // external_model, and (entity_name, entity_version, workload_size, + // workload_type, and scale_to_zero_enabled) is returned based on the + // endpoint type. + FoundationModel *FoundationModel `tfsdk:"foundation_model"` + // ARN of the instance profile that the served entity uses to access AWS + // resources. + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // The maximum tokens per second that the endpoint can scale up to. + MaxProvisionedThroughput types.Int64 `tfsdk:"max_provisioned_throughput"` + // The minimum tokens per second that the endpoint can scale down to. + MinProvisionedThroughput types.Int64 `tfsdk:"min_provisioned_throughput"` + // The name of the served entity. + Name types.String `tfsdk:"name"` + // Whether the compute resources for the served entity should scale down to + // zero. + ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled"` + // Information corresponding to the state of the served entity. + State *ServedModelState `tfsdk:"state"` + // The workload size of the served entity. The workload size corresponds to + // a range of provisioned concurrency that the compute autoscales between. A + // single unit of provisioned concurrency can process one request at a time. + // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 + // provisioned concurrency). If scale-to-zero is enabled, the lower bound of + // the provisioned concurrency for each workload size will be 0. + WorkloadSize types.String `tfsdk:"workload_size"` + // The workload type of the served entity. The workload type selects which + // type of compute to use in the endpoint. The default value for this + // parameter is "CPU". For deep learning workloads, GPU acceleration is + // available by selecting workload types like GPU_SMALL and others. See the + // available [GPU types]. + // + // [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType types.String `tfsdk:"workload_type"` +} + +type ServedEntitySpec struct { + // The name of the entity served. The entity may be a model in the + // Databricks Model Registry, a model in the Unity Catalog (UC), or a + // function of type FEATURE_SPEC in the UC. If it is a UC object, the full + // name of the object is given in the form of + // __catalog_name__.__schema_name__.__model_name__. + EntityName types.String `tfsdk:"entity_name"` + // The version of the served entity in Databricks Model Registry or empty if + // the entity is a FEATURE_SPEC. + EntityVersion types.String `tfsdk:"entity_version"` + // The external model that is served. NOTE: Only one of external_model, + // foundation_model, and (entity_name, entity_version) is returned based on + // the endpoint type. + ExternalModel *ExternalModel `tfsdk:"external_model"` + // The foundation model that is served. NOTE: Only one of foundation_model, + // external_model, and (entity_name, entity_version) is returned based on + // the endpoint type. + FoundationModel *FoundationModel `tfsdk:"foundation_model"` + // The name of the served entity. + Name types.String `tfsdk:"name"` +} + +type ServedModelInput struct { + // An object containing a set of optional, user-specified environment + // variable key-value pairs used for serving this model. Note: this is an + // experimental feature and subject to change. Example model environment + // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": + // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": + // "{{secrets/my_scope2/my_key2}}"}` + EnvironmentVars map[string]types.String `tfsdk:"environment_vars"` + // ARN of the instance profile that the served model will use to access AWS + // resources. + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // The name of the model in Databricks Model Registry to be served or if the + // model resides in Unity Catalog, the full name of model, in the form of + // __catalog_name__.__schema_name__.__model_name__. + ModelName types.String `tfsdk:"model_name"` + // The version of the model in Databricks Model Registry or Unity Catalog to + // be served. + ModelVersion types.String `tfsdk:"model_version"` + // The name of a served model. It must be unique across an endpoint. If not + // specified, this field will default to -. A + // served model name can consist of alphanumeric characters, dashes, and + // underscores. + Name types.String `tfsdk:"name"` + // Whether the compute resources for the served model should scale down to + // zero. + ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled"` + // The workload size of the served model. The workload size corresponds to a + // range of provisioned concurrency that the compute will autoscale between. + // A single unit of provisioned concurrency can process one request at a + // time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 + // provisioned concurrency). If scale-to-zero is enabled, the lower bound of + // the provisioned concurrency for each workload size will be 0. + WorkloadSize ServedModelInputWorkloadSize `tfsdk:"workload_size"` + // The workload type of the served model. The workload type selects which + // type of compute to use in the endpoint. The default value for this + // parameter is "CPU". For deep learning workloads, GPU acceleration is + // available by selecting workload types like GPU_SMALL and others. See the + // available [GPU types]. + // + // [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType ServedModelInputWorkloadType `tfsdk:"workload_type"` +} + +// The workload size of the served model. The workload size corresponds to a +// range of provisioned concurrency that the compute will autoscale between. A +// single unit of provisioned concurrency can process one request at a time. +// Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 +// - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). +// If scale-to-zero is enabled, the lower bound of the provisioned concurrency +// for each workload size will be 0. +type ServedModelInputWorkloadSize string + +const ServedModelInputWorkloadSizeLarge ServedModelInputWorkloadSize = `Large` + +const ServedModelInputWorkloadSizeMedium ServedModelInputWorkloadSize = `Medium` + +const ServedModelInputWorkloadSizeSmall ServedModelInputWorkloadSize = `Small` + +// String representation for [fmt.Print] +func (f *ServedModelInputWorkloadSize) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServedModelInputWorkloadSize) Set(v string) error { + switch v { + case `Large`, `Medium`, `Small`: + *f = ServedModelInputWorkloadSize(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Large", "Medium", "Small"`, v) + } +} + +// Type always returns ServedModelInputWorkloadSize to satisfy [pflag.Value] interface +func (f *ServedModelInputWorkloadSize) Type() string { + return "ServedModelInputWorkloadSize" +} + +// The workload type of the served model. The workload type selects which type +// of compute to use in the endpoint. The default value for this parameter is +// "CPU". For deep learning workloads, GPU acceleration is available by +// selecting workload types like GPU_SMALL and others. See the available [GPU +// types]. +// +// [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types +type ServedModelInputWorkloadType string + +const ServedModelInputWorkloadTypeCpu ServedModelInputWorkloadType = `CPU` + +const ServedModelInputWorkloadTypeGpuLarge ServedModelInputWorkloadType = `GPU_LARGE` + +const ServedModelInputWorkloadTypeGpuMedium ServedModelInputWorkloadType = `GPU_MEDIUM` + +const ServedModelInputWorkloadTypeGpuSmall ServedModelInputWorkloadType = `GPU_SMALL` + +const ServedModelInputWorkloadTypeMultigpuMedium ServedModelInputWorkloadType = `MULTIGPU_MEDIUM` + +// String representation for [fmt.Print] +func (f *ServedModelInputWorkloadType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServedModelInputWorkloadType) Set(v string) error { + switch v { + case `CPU`, `GPU_LARGE`, `GPU_MEDIUM`, `GPU_SMALL`, `MULTIGPU_MEDIUM`: + *f = ServedModelInputWorkloadType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CPU", "GPU_LARGE", "GPU_MEDIUM", "GPU_SMALL", "MULTIGPU_MEDIUM"`, v) + } +} + +// Type always returns ServedModelInputWorkloadType to satisfy [pflag.Value] interface +func (f *ServedModelInputWorkloadType) Type() string { + return "ServedModelInputWorkloadType" +} + +type ServedModelOutput struct { + // The creation timestamp of the served model in Unix time. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // The email of the user who created the served model. + Creator types.String `tfsdk:"creator"` + // An object containing a set of optional, user-specified environment + // variable key-value pairs used for serving this model. Note: this is an + // experimental feature and subject to change. Example model environment + // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": + // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": + // "{{secrets/my_scope2/my_key2}}"}` + EnvironmentVars map[string]types.String `tfsdk:"environment_vars"` + // ARN of the instance profile that the served model will use to access AWS + // resources. + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // The name of the model in Databricks Model Registry or the full name of + // the model in Unity Catalog. + ModelName types.String `tfsdk:"model_name"` + // The version of the model in Databricks Model Registry or Unity Catalog to + // be served. + ModelVersion types.String `tfsdk:"model_version"` + // The name of the served model. + Name types.String `tfsdk:"name"` + // Whether the compute resources for the Served Model should scale down to + // zero. + ScaleToZeroEnabled types.Bool `tfsdk:"scale_to_zero_enabled"` + // Information corresponding to the state of the Served Model. + State *ServedModelState `tfsdk:"state"` + // The workload size of the served model. The workload size corresponds to a + // range of provisioned concurrency that the compute will autoscale between. + // A single unit of provisioned concurrency can process one request at a + // time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 + // provisioned concurrency). If scale-to-zero is enabled, the lower bound of + // the provisioned concurrency for each workload size will be 0. + WorkloadSize types.String `tfsdk:"workload_size"` + // The workload type of the served model. The workload type selects which + // type of compute to use in the endpoint. The default value for this + // parameter is "CPU". For deep learning workloads, GPU acceleration is + // available by selecting workload types like GPU_SMALL and others. See the + // available [GPU types]. + // + // [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType types.String `tfsdk:"workload_type"` +} + +type ServedModelSpec struct { + // The name of the model in Databricks Model Registry or the full name of + // the model in Unity Catalog. + ModelName types.String `tfsdk:"model_name"` + // The version of the model in Databricks Model Registry or Unity Catalog to + // be served. + ModelVersion types.String `tfsdk:"model_version"` + // The name of the served model. + Name types.String `tfsdk:"name"` +} + +type ServedModelState struct { + // The state of the served entity deployment. DEPLOYMENT_CREATING indicates + // that the served entity is not ready yet because the deployment is still + // being created (i.e container image is building, model server is deploying + // for the first time, etc.). DEPLOYMENT_RECOVERING indicates that the + // served entity was previously in a ready state but no longer is and is + // attempting to recover. DEPLOYMENT_READY indicates that the served entity + // is ready to receive traffic. DEPLOYMENT_FAILED indicates that there was + // an error trying to bring up the served entity (e.g container image build + // failed, the model server failed to start due to a model loading error, + // etc.) DEPLOYMENT_ABORTED indicates that the deployment was terminated + // likely due to a failure in bringing up another served entity under the + // same endpoint and config version. + Deployment ServedModelStateDeployment `tfsdk:"deployment"` + // More information about the state of the served entity, if available. + DeploymentStateMessage types.String `tfsdk:"deployment_state_message"` +} + +// The state of the served entity deployment. DEPLOYMENT_CREATING indicates that +// the served entity is not ready yet because the deployment is still being +// created (i.e container image is building, model server is deploying for the +// first time, etc.). DEPLOYMENT_RECOVERING indicates that the served entity was +// previously in a ready state but no longer is and is attempting to recover. +// DEPLOYMENT_READY indicates that the served entity is ready to receive +// traffic. DEPLOYMENT_FAILED indicates that there was an error trying to bring +// up the served entity (e.g container image build failed, the model server +// failed to start due to a model loading error, etc.) DEPLOYMENT_ABORTED +// indicates that the deployment was terminated likely due to a failure in +// bringing up another served entity under the same endpoint and config version. +type ServedModelStateDeployment string + +const ServedModelStateDeploymentAborted ServedModelStateDeployment = `DEPLOYMENT_ABORTED` + +const ServedModelStateDeploymentCreating ServedModelStateDeployment = `DEPLOYMENT_CREATING` + +const ServedModelStateDeploymentFailed ServedModelStateDeployment = `DEPLOYMENT_FAILED` + +const ServedModelStateDeploymentReady ServedModelStateDeployment = `DEPLOYMENT_READY` + +const ServedModelStateDeploymentRecovering ServedModelStateDeployment = `DEPLOYMENT_RECOVERING` + +// String representation for [fmt.Print] +func (f *ServedModelStateDeployment) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServedModelStateDeployment) Set(v string) error { + switch v { + case `DEPLOYMENT_ABORTED`, `DEPLOYMENT_CREATING`, `DEPLOYMENT_FAILED`, `DEPLOYMENT_READY`, `DEPLOYMENT_RECOVERING`: + *f = ServedModelStateDeployment(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEPLOYMENT_ABORTED", "DEPLOYMENT_CREATING", "DEPLOYMENT_FAILED", "DEPLOYMENT_READY", "DEPLOYMENT_RECOVERING"`, v) + } +} + +// Type always returns ServedModelStateDeployment to satisfy [pflag.Value] interface +func (f *ServedModelStateDeployment) Type() string { + return "ServedModelStateDeployment" +} + +type ServerLogsResponse struct { + // The most recent log lines of the model server processing invocation + // requests. + Logs types.String `tfsdk:"logs"` +} + +type ServingEndpoint struct { + // The config that is currently being served by the endpoint. + Config *EndpointCoreConfigSummary `tfsdk:"config"` + // The timestamp when the endpoint was created in Unix time. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // The email of the user who created the serving endpoint. + Creator types.String `tfsdk:"creator"` + // System-generated ID of the endpoint. This is used to refer to the + // endpoint in the Permissions API + Id types.String `tfsdk:"id"` + // The timestamp when the endpoint was last updated by a user in Unix time. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // The name of the serving endpoint. + Name types.String `tfsdk:"name"` + // Information corresponding to the state of the serving endpoint. + State *EndpointState `tfsdk:"state"` + // Tags attached to the serving endpoint. + Tags []EndpointTag `tfsdk:"tags"` + // The task type of the serving endpoint. + Task types.String `tfsdk:"task"` +} + +type ServingEndpointAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel ServingEndpointPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type ServingEndpointAccessControlResponse struct { + // All permissions. + AllPermissions []ServingEndpointPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type ServingEndpointDetailed struct { + // The config that is currently being served by the endpoint. + Config *EndpointCoreConfigOutput `tfsdk:"config"` + // The timestamp when the endpoint was created in Unix time. + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // The email of the user who created the serving endpoint. + Creator types.String `tfsdk:"creator"` + // Information required to query DataPlane APIs. + DataPlaneInfo *ModelDataPlaneInfo `tfsdk:"data_plane_info"` + // Endpoint invocation url if route optimization is enabled for endpoint + EndpointUrl types.String `tfsdk:"endpoint_url"` + // System-generated ID of the endpoint. This is used to refer to the + // endpoint in the Permissions API + Id types.String `tfsdk:"id"` + // The timestamp when the endpoint was last updated by a user in Unix time. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // The name of the serving endpoint. + Name types.String `tfsdk:"name"` + // The config that the endpoint is attempting to update to. + PendingConfig *EndpointPendingConfig `tfsdk:"pending_config"` + // The permission level of the principal making the request. + PermissionLevel ServingEndpointDetailedPermissionLevel `tfsdk:"permission_level"` + // Boolean representing if route optimization has been enabled for the + // endpoint + RouteOptimized types.Bool `tfsdk:"route_optimized"` + // Information corresponding to the state of the serving endpoint. + State *EndpointState `tfsdk:"state"` + // Tags attached to the serving endpoint. + Tags []EndpointTag `tfsdk:"tags"` + // The task type of the serving endpoint. + Task types.String `tfsdk:"task"` +} + +// The permission level of the principal making the request. +type ServingEndpointDetailedPermissionLevel string + +const ServingEndpointDetailedPermissionLevelCanManage ServingEndpointDetailedPermissionLevel = `CAN_MANAGE` + +const ServingEndpointDetailedPermissionLevelCanQuery ServingEndpointDetailedPermissionLevel = `CAN_QUERY` + +const ServingEndpointDetailedPermissionLevelCanView ServingEndpointDetailedPermissionLevel = `CAN_VIEW` + +// String representation for [fmt.Print] +func (f *ServingEndpointDetailedPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServingEndpointDetailedPermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_QUERY`, `CAN_VIEW`: + *f = ServingEndpointDetailedPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_QUERY", "CAN_VIEW"`, v) + } +} + +// Type always returns ServingEndpointDetailedPermissionLevel to satisfy [pflag.Value] interface +func (f *ServingEndpointDetailedPermissionLevel) Type() string { + return "ServingEndpointDetailedPermissionLevel" +} + +type ServingEndpointPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel ServingEndpointPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type ServingEndpointPermissionLevel string + +const ServingEndpointPermissionLevelCanManage ServingEndpointPermissionLevel = `CAN_MANAGE` + +const ServingEndpointPermissionLevelCanQuery ServingEndpointPermissionLevel = `CAN_QUERY` + +const ServingEndpointPermissionLevelCanView ServingEndpointPermissionLevel = `CAN_VIEW` + +// String representation for [fmt.Print] +func (f *ServingEndpointPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServingEndpointPermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_QUERY`, `CAN_VIEW`: + *f = ServingEndpointPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_QUERY", "CAN_VIEW"`, v) + } +} + +// Type always returns ServingEndpointPermissionLevel to satisfy [pflag.Value] interface +func (f *ServingEndpointPermissionLevel) Type() string { + return "ServingEndpointPermissionLevel" +} + +type ServingEndpointPermissions struct { + AccessControlList []ServingEndpointAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type ServingEndpointPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel ServingEndpointPermissionLevel `tfsdk:"permission_level"` +} + +type ServingEndpointPermissionsRequest struct { + AccessControlList []ServingEndpointAccessControlRequest `tfsdk:"access_control_list"` + // The serving endpoint for which to get or manage permissions. + ServingEndpointId types.String `tfsdk:"-" url:"-"` +} + +type StartAppRequest struct { + // The name of the app. + Name types.String `tfsdk:"-" url:"-"` +} + +type StopAppRequest struct { + // The name of the app. + Name types.String `tfsdk:"-" url:"-"` +} + +type StopAppResponse struct { +} + +type TrafficConfig struct { + // The list of routes that define traffic to each served entity. + Routes []Route `tfsdk:"routes"` +} + +type UpdateAppRequest struct { + // The description of the app. + Description types.String `tfsdk:"description"` + // The name of the app. The name must contain only lowercase alphanumeric + // characters and hyphens. It must be unique within the workspace. + Name types.String `tfsdk:"name" url:"-"` +} + +type V1ResponseChoiceElement struct { + // The finish reason returned by the endpoint. + FinishReason types.String `tfsdk:"finishReason"` + // The index of the choice in the __chat or completions__ response. + Index types.Int64 `tfsdk:"index"` + // The logprobs returned only by the __completions__ endpoint. + Logprobs types.Int64 `tfsdk:"logprobs"` + // The message response from the __chat__ endpoint. + Message *ChatMessage `tfsdk:"message"` + // The text response from the __completions__ endpoint. + Text types.String `tfsdk:"text"` +} diff --git a/service/settings_tf/model.go b/service/settings_tf/model.go new file mode 100755 index 0000000000..bfdfec0abb --- /dev/null +++ b/service/settings_tf/model.go @@ -0,0 +1,1493 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package settings_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AutomaticClusterUpdateSetting struct { + AutomaticClusterUpdateWorkspace ClusterAutoRestartMessage `tfsdk:"automatic_cluster_update_workspace"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name"` +} + +type ClusterAutoRestartMessage struct { + CanToggle types.Bool `tfsdk:"can_toggle"` + + Enabled types.Bool `tfsdk:"enabled"` + // Contains an information about the enablement status judging (e.g. whether + // the enterprise tier is enabled) This is only additional information that + // MUST NOT be used to decide whether the setting is enabled or not. This is + // intended to use only for purposes like showing an error message to the + // customer with the additional details. For example, using these details we + // can check why exactly the feature is disabled for this customer. + EnablementDetails *ClusterAutoRestartMessageEnablementDetails `tfsdk:"enablement_details"` + + MaintenanceWindow *ClusterAutoRestartMessageMaintenanceWindow `tfsdk:"maintenance_window"` + + RestartEvenIfNoUpdatesAvailable types.Bool `tfsdk:"restart_even_if_no_updates_available"` +} + +// Contains an information about the enablement status judging (e.g. whether the +// enterprise tier is enabled) This is only additional information that MUST NOT +// be used to decide whether the setting is enabled or not. This is intended to +// use only for purposes like showing an error message to the customer with the +// additional details. For example, using these details we can check why exactly +// the feature is disabled for this customer. +type ClusterAutoRestartMessageEnablementDetails struct { + // The feature is force enabled if compliance mode is active + ForcedForComplianceMode types.Bool `tfsdk:"forced_for_compliance_mode"` + // The feature is unavailable if the corresponding entitlement disabled (see + // getShieldEntitlementEnable) + UnavailableForDisabledEntitlement types.Bool `tfsdk:"unavailable_for_disabled_entitlement"` + // The feature is unavailable if the customer doesn't have enterprise tier + UnavailableForNonEnterpriseTier types.Bool `tfsdk:"unavailable_for_non_enterprise_tier"` +} + +type ClusterAutoRestartMessageMaintenanceWindow struct { + WeekDayBasedSchedule *ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule `tfsdk:"week_day_based_schedule"` +} + +type ClusterAutoRestartMessageMaintenanceWindowDayOfWeek string + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekDayOfWeekUnspecified ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `DAY_OF_WEEK_UNSPECIFIED` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekFriday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `FRIDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekMonday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `MONDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekSaturday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `SATURDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekSunday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `SUNDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekThursday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `THURSDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekTuesday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `TUESDAY` + +const ClusterAutoRestartMessageMaintenanceWindowDayOfWeekWednesday ClusterAutoRestartMessageMaintenanceWindowDayOfWeek = `WEDNESDAY` + +// String representation for [fmt.Print] +func (f *ClusterAutoRestartMessageMaintenanceWindowDayOfWeek) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterAutoRestartMessageMaintenanceWindowDayOfWeek) Set(v string) error { + switch v { + case `DAY_OF_WEEK_UNSPECIFIED`, `FRIDAY`, `MONDAY`, `SATURDAY`, `SUNDAY`, `THURSDAY`, `TUESDAY`, `WEDNESDAY`: + *f = ClusterAutoRestartMessageMaintenanceWindowDayOfWeek(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DAY_OF_WEEK_UNSPECIFIED", "FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY"`, v) + } +} + +// Type always returns ClusterAutoRestartMessageMaintenanceWindowDayOfWeek to satisfy [pflag.Value] interface +func (f *ClusterAutoRestartMessageMaintenanceWindowDayOfWeek) Type() string { + return "ClusterAutoRestartMessageMaintenanceWindowDayOfWeek" +} + +type ClusterAutoRestartMessageMaintenanceWindowWeekDayBasedSchedule struct { + DayOfWeek ClusterAutoRestartMessageMaintenanceWindowDayOfWeek `tfsdk:"day_of_week"` + + Frequency ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency `tfsdk:"frequency"` + + WindowStartTime *ClusterAutoRestartMessageMaintenanceWindowWindowStartTime `tfsdk:"window_start_time"` +} + +type ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency string + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyEveryWeek ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `EVERY_WEEK` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyFirstAndThirdOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `FIRST_AND_THIRD_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyFirstOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `FIRST_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyFourthOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `FOURTH_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencySecondAndFourthOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `SECOND_AND_FOURTH_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencySecondOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `SECOND_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyThirdOfMonth ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `THIRD_OF_MONTH` + +const ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequencyWeekDayFrequencyUnspecified ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency = `WEEK_DAY_FREQUENCY_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency) Set(v string) error { + switch v { + case `EVERY_WEEK`, `FIRST_AND_THIRD_OF_MONTH`, `FIRST_OF_MONTH`, `FOURTH_OF_MONTH`, `SECOND_AND_FOURTH_OF_MONTH`, `SECOND_OF_MONTH`, `THIRD_OF_MONTH`, `WEEK_DAY_FREQUENCY_UNSPECIFIED`: + *f = ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EVERY_WEEK", "FIRST_AND_THIRD_OF_MONTH", "FIRST_OF_MONTH", "FOURTH_OF_MONTH", "SECOND_AND_FOURTH_OF_MONTH", "SECOND_OF_MONTH", "THIRD_OF_MONTH", "WEEK_DAY_FREQUENCY_UNSPECIFIED"`, v) + } +} + +// Type always returns ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency to satisfy [pflag.Value] interface +func (f *ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency) Type() string { + return "ClusterAutoRestartMessageMaintenanceWindowWeekDayFrequency" +} + +type ClusterAutoRestartMessageMaintenanceWindowWindowStartTime struct { + Hours types.Int64 `tfsdk:"hours"` + + Minutes types.Int64 `tfsdk:"minutes"` +} + +// SHIELD feature: CSP +type ComplianceSecurityProfile struct { + // Set by customers when they request Compliance Security Profile (CSP) + ComplianceStandards []ComplianceStandard `tfsdk:"compliance_standards"` + + IsEnabled types.Bool `tfsdk:"is_enabled"` +} + +type ComplianceSecurityProfileSetting struct { + // SHIELD feature: CSP + ComplianceSecurityProfileWorkspace ComplianceSecurityProfile `tfsdk:"compliance_security_profile_workspace"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name"` +} + +// Compliance stardard for SHIELD customers +type ComplianceStandard string + +const ComplianceStandardComplianceStandardUnspecified ComplianceStandard = `COMPLIANCE_STANDARD_UNSPECIFIED` + +const ComplianceStandardCyberEssentialPlus ComplianceStandard = `CYBER_ESSENTIAL_PLUS` + +const ComplianceStandardFedrampHigh ComplianceStandard = `FEDRAMP_HIGH` + +const ComplianceStandardFedrampIl5 ComplianceStandard = `FEDRAMP_IL5` + +const ComplianceStandardFedrampModerate ComplianceStandard = `FEDRAMP_MODERATE` + +const ComplianceStandardHipaa ComplianceStandard = `HIPAA` + +const ComplianceStandardIrapProtected ComplianceStandard = `IRAP_PROTECTED` + +const ComplianceStandardItarEar ComplianceStandard = `ITAR_EAR` + +const ComplianceStandardNone ComplianceStandard = `NONE` + +const ComplianceStandardPciDss ComplianceStandard = `PCI_DSS` + +// String representation for [fmt.Print] +func (f *ComplianceStandard) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ComplianceStandard) Set(v string) error { + switch v { + case `COMPLIANCE_STANDARD_UNSPECIFIED`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `IRAP_PROTECTED`, `ITAR_EAR`, `NONE`, `PCI_DSS`: + *f = ComplianceStandard(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COMPLIANCE_STANDARD_UNSPECIFIED", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "IRAP_PROTECTED", "ITAR_EAR", "NONE", "PCI_DSS"`, v) + } +} + +// Type always returns ComplianceStandard to satisfy [pflag.Value] interface +func (f *ComplianceStandard) Type() string { + return "ComplianceStandard" +} + +// Details required to configure a block list or allow list. +type CreateIpAccessList struct { + IpAddresses []types.String `tfsdk:"ip_addresses"` + // Label for the IP access list. This **cannot** be empty. + Label types.String `tfsdk:"label"` + // Type of IP access list. Valid values are as follows and are + // case-sensitive: + // + // * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block + // list. Exclude this IP or range. IP addresses in the block list are + // excluded even if they are included in an allow list. + ListType ListType `tfsdk:"list_type"` +} + +// An IP access list was successfully created. +type CreateIpAccessListResponse struct { + // Definition of an IP Access list + IpAccessList *IpAccessListInfo `tfsdk:"ip_access_list"` +} + +type CreateNetworkConnectivityConfigRequest struct { + // The name of the network connectivity configuration. The name can contain + // alphanumeric characters, hyphens, and underscores. The length must be + // between 3 and 30 characters. The name must match the regular expression + // `^[0-9a-zA-Z-_]{3,30}$`. + Name types.String `tfsdk:"name"` + // The region for the network connectivity configuration. Only workspaces in + // the same region can be attached to the network connectivity + // configuration. + Region types.String `tfsdk:"region"` +} + +// Configuration details for creating on-behalf tokens. +type CreateOboTokenRequest struct { + // Application ID of the service principal. + ApplicationId types.String `tfsdk:"application_id"` + // Comment that describes the purpose of the token. + Comment types.String `tfsdk:"comment"` + // The number of seconds before the token expires. + LifetimeSeconds types.Int64 `tfsdk:"lifetime_seconds"` +} + +// An on-behalf token was successfully created for the service principal. +type CreateOboTokenResponse struct { + TokenInfo *TokenInfo `tfsdk:"token_info"` + // Value of the token. + TokenValue types.String `tfsdk:"token_value"` +} + +type CreatePrivateEndpointRuleRequest struct { + // The sub-resource type (group ID) of the target resource. Note that to + // connect to workspace root storage (root DBFS), you need two endpoints, + // one for `blob` and one for `dfs`. + GroupId CreatePrivateEndpointRuleRequestGroupId `tfsdk:"group_id"` + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId types.String `tfsdk:"-" url:"-"` + // The Azure resource ID of the target resource. + ResourceId types.String `tfsdk:"resource_id"` +} + +// The sub-resource type (group ID) of the target resource. Note that to connect +// to workspace root storage (root DBFS), you need two endpoints, one for `blob` +// and one for `dfs`. +type CreatePrivateEndpointRuleRequestGroupId string + +const CreatePrivateEndpointRuleRequestGroupIdBlob CreatePrivateEndpointRuleRequestGroupId = `blob` + +const CreatePrivateEndpointRuleRequestGroupIdDfs CreatePrivateEndpointRuleRequestGroupId = `dfs` + +const CreatePrivateEndpointRuleRequestGroupIdMysqlServer CreatePrivateEndpointRuleRequestGroupId = `mysqlServer` + +const CreatePrivateEndpointRuleRequestGroupIdSqlServer CreatePrivateEndpointRuleRequestGroupId = `sqlServer` + +// String representation for [fmt.Print] +func (f *CreatePrivateEndpointRuleRequestGroupId) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreatePrivateEndpointRuleRequestGroupId) Set(v string) error { + switch v { + case `blob`, `dfs`, `mysqlServer`, `sqlServer`: + *f = CreatePrivateEndpointRuleRequestGroupId(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "blob", "dfs", "mysqlServer", "sqlServer"`, v) + } +} + +// Type always returns CreatePrivateEndpointRuleRequestGroupId to satisfy [pflag.Value] interface +func (f *CreatePrivateEndpointRuleRequestGroupId) Type() string { + return "CreatePrivateEndpointRuleRequestGroupId" +} + +type CreateTokenRequest struct { + // Optional description to attach to the token. + Comment types.String `tfsdk:"comment"` + // The lifetime of the token, in seconds. + // + // If the lifetime is not specified, this token remains valid indefinitely. + LifetimeSeconds types.Int64 `tfsdk:"lifetime_seconds"` +} + +type CreateTokenResponse struct { + // The information for the new token. + TokenInfo *PublicTokenInfo `tfsdk:"token_info"` + // The value of the new token. + TokenValue types.String `tfsdk:"token_value"` +} + +// Account level policy for CSP +type CspEnablementAccount struct { + // Set by customers when they request Compliance Security Profile (CSP) + // Invariants are enforced in Settings policy. + ComplianceStandards []ComplianceStandard `tfsdk:"compliance_standards"` + // Enforced = it cannot be overriden at workspace level. + IsEnforced types.Bool `tfsdk:"is_enforced"` +} + +type CspEnablementAccountSetting struct { + // Account level policy for CSP + CspEnablementAccount CspEnablementAccount `tfsdk:"csp_enablement_account"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name"` +} + +// This represents the setting configuration for the default namespace in the +// Databricks workspace. Setting the default catalog for the workspace +// determines the catalog that is used when queries do not reference a fully +// qualified 3 level name. For example, if the default catalog is set to +// 'retail_prod' then a query 'SELECT * FROM myTable' would reference the object +// 'retail_prod.default.myTable' (the schema 'default' is always assumed). This +// setting requires a restart of clusters and SQL warehouses to take effect. +// Additionally, the default namespace only applies when using Unity +// Catalog-enabled compute. +type DefaultNamespaceSetting struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag"` + + Namespace StringMessage `tfsdk:"namespace"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name"` +} + +// Delete access list +type DeleteAccountIpAccessListRequest struct { + // The ID for the corresponding IP access list + IpAccessListId types.String `tfsdk:"-" url:"-"` +} + +// Delete the default namespace setting +type DeleteDefaultNamespaceSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// The etag is returned. +type DeleteDefaultNamespaceSettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag"` +} + +// Delete access list +type DeleteIpAccessListRequest struct { + // The ID for the corresponding IP access list + IpAccessListId types.String `tfsdk:"-" url:"-"` +} + +// Delete a network connectivity configuration +type DeleteNetworkConnectivityConfigurationRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId types.String `tfsdk:"-" url:"-"` +} + +type DeleteNetworkConnectivityConfigurationResponse struct { +} + +// Delete Personal Compute setting +type DeletePersonalComputeSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// The etag is returned. +type DeletePersonalComputeSettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag"` +} + +// Delete a private endpoint rule +type DeletePrivateEndpointRuleRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId types.String `tfsdk:"-" url:"-"` + // Your private endpoint rule ID. + PrivateEndpointRuleId types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete the restrict workspace admins setting +type DeleteRestrictWorkspaceAdminsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// The etag is returned. +type DeleteRestrictWorkspaceAdminsSettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag"` +} + +// Delete a token +type DeleteTokenManagementRequest struct { + // The ID of the token to get. + TokenId types.String `tfsdk:"-" url:"-"` +} + +// SHIELD feature: ESM +type EnhancedSecurityMonitoring struct { + IsEnabled types.Bool `tfsdk:"is_enabled"` +} + +type EnhancedSecurityMonitoringSetting struct { + // SHIELD feature: ESM + EnhancedSecurityMonitoringWorkspace EnhancedSecurityMonitoring `tfsdk:"enhanced_security_monitoring_workspace"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name"` +} + +// Account level policy for ESM +type EsmEnablementAccount struct { + IsEnforced types.Bool `tfsdk:"is_enforced"` +} + +type EsmEnablementAccountSetting struct { + // Account level policy for ESM + EsmEnablementAccount EsmEnablementAccount `tfsdk:"esm_enablement_account"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name"` +} + +// The exchange token is the result of the token exchange with the IdP +type ExchangeToken struct { + // The requested token. + Credential types.String `tfsdk:"credential"` + // The end-of-life timestamp of the token. The value is in milliseconds + // since the Unix epoch. + CredentialEolTime types.Int64 `tfsdk:"credentialEolTime"` + // User ID of the user that owns this token. + OwnerId types.Int64 `tfsdk:"ownerId"` + // The scopes of access granted in the token. + Scopes []types.String `tfsdk:"scopes"` + // The type of this exchange token + TokenType TokenType `tfsdk:"tokenType"` +} + +// Exchange a token with the IdP +type ExchangeTokenRequest struct { + // The partition of Credentials store + PartitionId PartitionId `tfsdk:"partitionId"` + // Array of scopes for the token request. + Scopes []types.String `tfsdk:"scopes"` + // A list of token types being requested + TokenType []TokenType `tfsdk:"tokenType"` +} + +// Exhanged tokens were successfully returned. +type ExchangeTokenResponse struct { + Values []ExchangeToken `tfsdk:"values"` +} + +// An IP access list was successfully returned. +type FetchIpAccessListResponse struct { + // Definition of an IP Access list + IpAccessList *IpAccessListInfo `tfsdk:"ip_access_list"` +} + +// Get IP access list +type GetAccountIpAccessListRequest struct { + // The ID for the corresponding IP access list + IpAccessListId types.String `tfsdk:"-" url:"-"` +} + +// Get the automatic cluster update setting +type GetAutomaticClusterUpdateSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// Get the compliance security profile setting +type GetComplianceSecurityProfileSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// Get the compliance security profile setting for new workspaces +type GetCspEnablementAccountSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// Get the default namespace setting +type GetDefaultNamespaceSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// Get the enhanced security monitoring setting +type GetEnhancedSecurityMonitoringSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// Get the enhanced security monitoring setting for new workspaces +type GetEsmEnablementAccountSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// Get access list +type GetIpAccessListRequest struct { + // The ID for the corresponding IP access list + IpAccessListId types.String `tfsdk:"-" url:"-"` +} + +type GetIpAccessListResponse struct { + // Definition of an IP Access list + IpAccessList *IpAccessListInfo `tfsdk:"ip_access_list"` +} + +// IP access lists were successfully returned. +type GetIpAccessListsResponse struct { + IpAccessLists []IpAccessListInfo `tfsdk:"ip_access_lists"` +} + +// Get a network connectivity configuration +type GetNetworkConnectivityConfigurationRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId types.String `tfsdk:"-" url:"-"` +} + +// Get Personal Compute setting +type GetPersonalComputeSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// Get a private endpoint rule +type GetPrivateEndpointRuleRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId types.String `tfsdk:"-" url:"-"` + // Your private endpoint rule ID. + PrivateEndpointRuleId types.String `tfsdk:"-" url:"-"` +} + +// Get the restrict workspace admins setting +type GetRestrictWorkspaceAdminsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-" url:"etag,omitempty"` +} + +// Check configuration status +type GetStatusRequest struct { + Keys types.String `tfsdk:"-" url:"keys"` +} + +// Get token info +type GetTokenManagementRequest struct { + // The ID of the token to get. + TokenId types.String `tfsdk:"-" url:"-"` +} + +type GetTokenPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []TokenPermissionsDescription `tfsdk:"permission_levels"` +} + +// Token with specified Token ID was successfully returned. +type GetTokenResponse struct { + TokenInfo *TokenInfo `tfsdk:"token_info"` +} + +// Definition of an IP Access list +type IpAccessListInfo struct { + // Total number of IP or CIDR values. + AddressCount types.Int64 `tfsdk:"address_count"` + // Creation timestamp in milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // User ID of the user who created this list. + CreatedBy types.Int64 `tfsdk:"created_by"` + // Specifies whether this IP access list is enabled. + Enabled types.Bool `tfsdk:"enabled"` + + IpAddresses []types.String `tfsdk:"ip_addresses"` + // Label for the IP access list. This **cannot** be empty. + Label types.String `tfsdk:"label"` + // Universally unique identifier (UUID) of the IP access list. + ListId types.String `tfsdk:"list_id"` + // Type of IP access list. Valid values are as follows and are + // case-sensitive: + // + // * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block + // list. Exclude this IP or range. IP addresses in the block list are + // excluded even if they are included in an allow list. + ListType ListType `tfsdk:"list_type"` + // Update timestamp in milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // User ID of the user who updated this list. + UpdatedBy types.Int64 `tfsdk:"updated_by"` +} + +// IP access lists were successfully returned. +type ListIpAccessListResponse struct { + IpAccessLists []IpAccessListInfo `tfsdk:"ip_access_lists"` +} + +type ListNccAzurePrivateEndpointRulesResponse struct { + Items []NccAzurePrivateEndpointRule `tfsdk:"items"` + // A token that can be used to get the next page of results. If null, there + // are no more results to show. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List network connectivity configurations +type ListNetworkConnectivityConfigurationsRequest struct { + // Pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListNetworkConnectivityConfigurationsResponse struct { + Items []NetworkConnectivityConfiguration `tfsdk:"items"` + // A token that can be used to get the next page of results. If null, there + // are no more results to show. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List private endpoint rules +type ListPrivateEndpointRulesRequest struct { + // Your Network Connectvity Configuration ID. + NetworkConnectivityConfigId types.String `tfsdk:"-" url:"-"` + // Pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListPublicTokensResponse struct { + // The information for each token. + TokenInfos []PublicTokenInfo `tfsdk:"token_infos"` +} + +// List all tokens +type ListTokenManagementRequest struct { + // User ID of the user that created the token. + CreatedById types.Int64 `tfsdk:"-" url:"created_by_id,omitempty"` + // Username of the user that created the token. + CreatedByUsername types.String `tfsdk:"-" url:"created_by_username,omitempty"` +} + +// Tokens were successfully returned. +type ListTokensResponse struct { + // Token metadata of each user-created token in the workspace + TokenInfos []TokenInfo `tfsdk:"token_infos"` +} + +// Type of IP access list. Valid values are as follows and are case-sensitive: +// +// * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block list. +// Exclude this IP or range. IP addresses in the block list are excluded even if +// they are included in an allow list. +type ListType string + +// An allow list. Include this IP or range. +const ListTypeAllow ListType = `ALLOW` + +// A block list. Exclude this IP or range. IP addresses in the block list are +// excluded even if they are included in an allow list. +const ListTypeBlock ListType = `BLOCK` + +// String representation for [fmt.Print] +func (f *ListType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListType) Set(v string) error { + switch v { + case `ALLOW`, `BLOCK`: + *f = ListType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALLOW", "BLOCK"`, v) + } +} + +// Type always returns ListType to satisfy [pflag.Value] interface +func (f *ListType) Type() string { + return "ListType" +} + +// The stable AWS IP CIDR blocks. You can use these to configure the firewall of +// your resources to allow traffic from your Databricks workspace. +type NccAwsStableIpRule struct { + // The list of stable IP CIDR blocks from which Databricks network traffic + // originates when accessing your resources. + CidrBlocks []types.String `tfsdk:"cidr_blocks"` +} + +type NccAzurePrivateEndpointRule struct { + // The current status of this private endpoint. The private endpoint rules + // are effective only if the connection state is `ESTABLISHED`. Remember + // that you must approve new endpoints on your resources in the Azure portal + // before they take effect. + // + // The possible values are: - INIT: (deprecated) The endpoint has been + // created and pending approval. - PENDING: The endpoint has been created + // and pending approval. - ESTABLISHED: The endpoint has been approved and + // is ready to use in your serverless compute resources. - REJECTED: + // Connection was rejected by the private link resource owner. - + // DISCONNECTED: Connection was removed by the private link resource owner, + // the private endpoint becomes informative and should be deleted for + // clean-up. + ConnectionState NccAzurePrivateEndpointRuleConnectionState `tfsdk:"connection_state"` + // Time in epoch milliseconds when this object was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // Whether this private endpoint is deactivated. + Deactivated types.Bool `tfsdk:"deactivated"` + // Time in epoch milliseconds when this object was deactivated. + DeactivatedAt types.Int64 `tfsdk:"deactivated_at"` + // The name of the Azure private endpoint resource. + EndpointName types.String `tfsdk:"endpoint_name"` + // The sub-resource type (group ID) of the target resource. Note that to + // connect to workspace root storage (root DBFS), you need two endpoints, + // one for `blob` and one for `dfs`. + GroupId NccAzurePrivateEndpointRuleGroupId `tfsdk:"group_id"` + // The ID of a network connectivity configuration, which is the parent + // resource of this private endpoint rule object. + NetworkConnectivityConfigId types.String `tfsdk:"network_connectivity_config_id"` + // The Azure resource ID of the target resource. + ResourceId types.String `tfsdk:"resource_id"` + // The ID of a private endpoint rule. + RuleId types.String `tfsdk:"rule_id"` + // Time in epoch milliseconds when this object was updated. + UpdatedTime types.Int64 `tfsdk:"updated_time"` +} + +// The current status of this private endpoint. The private endpoint rules are +// effective only if the connection state is `ESTABLISHED`. Remember that you +// must approve new endpoints on your resources in the Azure portal before they +// take effect. +// +// The possible values are: - INIT: (deprecated) The endpoint has been created +// and pending approval. - PENDING: The endpoint has been created and pending +// approval. - ESTABLISHED: The endpoint has been approved and is ready to use +// in your serverless compute resources. - REJECTED: Connection was rejected by +// the private link resource owner. - DISCONNECTED: Connection was removed by +// the private link resource owner, the private endpoint becomes informative and +// should be deleted for clean-up. +type NccAzurePrivateEndpointRuleConnectionState string + +const NccAzurePrivateEndpointRuleConnectionStateDisconnected NccAzurePrivateEndpointRuleConnectionState = `DISCONNECTED` + +const NccAzurePrivateEndpointRuleConnectionStateEstablished NccAzurePrivateEndpointRuleConnectionState = `ESTABLISHED` + +const NccAzurePrivateEndpointRuleConnectionStateInit NccAzurePrivateEndpointRuleConnectionState = `INIT` + +const NccAzurePrivateEndpointRuleConnectionStatePending NccAzurePrivateEndpointRuleConnectionState = `PENDING` + +const NccAzurePrivateEndpointRuleConnectionStateRejected NccAzurePrivateEndpointRuleConnectionState = `REJECTED` + +// String representation for [fmt.Print] +func (f *NccAzurePrivateEndpointRuleConnectionState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *NccAzurePrivateEndpointRuleConnectionState) Set(v string) error { + switch v { + case `DISCONNECTED`, `ESTABLISHED`, `INIT`, `PENDING`, `REJECTED`: + *f = NccAzurePrivateEndpointRuleConnectionState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "ESTABLISHED", "INIT", "PENDING", "REJECTED"`, v) + } +} + +// Type always returns NccAzurePrivateEndpointRuleConnectionState to satisfy [pflag.Value] interface +func (f *NccAzurePrivateEndpointRuleConnectionState) Type() string { + return "NccAzurePrivateEndpointRuleConnectionState" +} + +// The sub-resource type (group ID) of the target resource. Note that to connect +// to workspace root storage (root DBFS), you need two endpoints, one for `blob` +// and one for `dfs`. +type NccAzurePrivateEndpointRuleGroupId string + +const NccAzurePrivateEndpointRuleGroupIdBlob NccAzurePrivateEndpointRuleGroupId = `blob` + +const NccAzurePrivateEndpointRuleGroupIdDfs NccAzurePrivateEndpointRuleGroupId = `dfs` + +const NccAzurePrivateEndpointRuleGroupIdMysqlServer NccAzurePrivateEndpointRuleGroupId = `mysqlServer` + +const NccAzurePrivateEndpointRuleGroupIdSqlServer NccAzurePrivateEndpointRuleGroupId = `sqlServer` + +// String representation for [fmt.Print] +func (f *NccAzurePrivateEndpointRuleGroupId) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *NccAzurePrivateEndpointRuleGroupId) Set(v string) error { + switch v { + case `blob`, `dfs`, `mysqlServer`, `sqlServer`: + *f = NccAzurePrivateEndpointRuleGroupId(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "blob", "dfs", "mysqlServer", "sqlServer"`, v) + } +} + +// Type always returns NccAzurePrivateEndpointRuleGroupId to satisfy [pflag.Value] interface +func (f *NccAzurePrivateEndpointRuleGroupId) Type() string { + return "NccAzurePrivateEndpointRuleGroupId" +} + +// The stable Azure service endpoints. You can configure the firewall of your +// Azure resources to allow traffic from your Databricks serverless compute +// resources. +type NccAzureServiceEndpointRule struct { + // The list of subnets from which Databricks network traffic originates when + // accessing your Azure resources. + Subnets []types.String `tfsdk:"subnets"` + // The Azure region in which this service endpoint rule applies. + TargetRegion types.String `tfsdk:"target_region"` + // The Azure services to which this service endpoint rule applies to. + TargetServices []types.String `tfsdk:"target_services"` +} + +// The network connectivity rules that apply to network traffic from your +// serverless compute resources. +type NccEgressConfig struct { + // The network connectivity rules that are applied by default without + // resource specific configurations. You can find the stable network + // information of your serverless compute resources here. + DefaultRules *NccEgressDefaultRules `tfsdk:"default_rules"` + // The network connectivity rules that configured for each destinations. + // These rules override default rules. + TargetRules *NccEgressTargetRules `tfsdk:"target_rules"` +} + +// The network connectivity rules that are applied by default without resource +// specific configurations. You can find the stable network information of your +// serverless compute resources here. +type NccEgressDefaultRules struct { + // The stable AWS IP CIDR blocks. You can use these to configure the + // firewall of your resources to allow traffic from your Databricks + // workspace. + AwsStableIpRule *NccAwsStableIpRule `tfsdk:"aws_stable_ip_rule"` + // The stable Azure service endpoints. You can configure the firewall of + // your Azure resources to allow traffic from your Databricks serverless + // compute resources. + AzureServiceEndpointRule *NccAzureServiceEndpointRule `tfsdk:"azure_service_endpoint_rule"` +} + +// The network connectivity rules that configured for each destinations. These +// rules override default rules. +type NccEgressTargetRules struct { + AzurePrivateEndpointRules []NccAzurePrivateEndpointRule `tfsdk:"azure_private_endpoint_rules"` +} + +type NetworkConnectivityConfiguration struct { + // The Databricks account ID that hosts the credential. + AccountId types.String `tfsdk:"account_id"` + // Time in epoch milliseconds when this object was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // The network connectivity rules that apply to network traffic from your + // serverless compute resources. + EgressConfig *NccEgressConfig `tfsdk:"egress_config"` + // The name of the network connectivity configuration. The name can contain + // alphanumeric characters, hyphens, and underscores. The length must be + // between 3 and 30 characters. The name must match the regular expression + // `^[0-9a-zA-Z-_]{3,30}$`. + Name types.String `tfsdk:"name"` + // Databricks network connectivity configuration ID. + NetworkConnectivityConfigId types.String `tfsdk:"network_connectivity_config_id"` + // The region for the network connectivity configuration. Only workspaces in + // the same region can be attached to the network connectivity + // configuration. + Region types.String `tfsdk:"region"` + // Time in epoch milliseconds when this object was updated. + UpdatedTime types.Int64 `tfsdk:"updated_time"` +} + +// Partition by workspace or account +type PartitionId struct { + // The ID of the workspace. + WorkspaceId types.Int64 `tfsdk:"workspaceId"` +} + +type PersonalComputeMessage struct { + // ON: Grants all users in all workspaces access to the Personal Compute + // default policy, allowing all users to create single-machine compute + // resources. DELEGATE: Moves access control for the Personal Compute + // default policy to individual workspaces and requires a workspace’s + // users or groups to be added to the ACLs of that workspace’s Personal + // Compute default policy before they will be able to create compute + // resources through that policy. + Value PersonalComputeMessageEnum `tfsdk:"value"` +} + +// ON: Grants all users in all workspaces access to the Personal Compute default +// policy, allowing all users to create single-machine compute resources. +// DELEGATE: Moves access control for the Personal Compute default policy to +// individual workspaces and requires a workspace’s users or groups to be +// added to the ACLs of that workspace’s Personal Compute default policy +// before they will be able to create compute resources through that policy. +type PersonalComputeMessageEnum string + +const PersonalComputeMessageEnumDelegate PersonalComputeMessageEnum = `DELEGATE` + +const PersonalComputeMessageEnumOn PersonalComputeMessageEnum = `ON` + +// String representation for [fmt.Print] +func (f *PersonalComputeMessageEnum) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PersonalComputeMessageEnum) Set(v string) error { + switch v { + case `DELEGATE`, `ON`: + *f = PersonalComputeMessageEnum(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELEGATE", "ON"`, v) + } +} + +// Type always returns PersonalComputeMessageEnum to satisfy [pflag.Value] interface +func (f *PersonalComputeMessageEnum) Type() string { + return "PersonalComputeMessageEnum" +} + +type PersonalComputeSetting struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag"` + + PersonalCompute PersonalComputeMessage `tfsdk:"personal_compute"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name"` +} + +type PublicTokenInfo struct { + // Comment the token was created with, if applicable. + Comment types.String `tfsdk:"comment"` + // Server time (in epoch milliseconds) when the token was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // Server time (in epoch milliseconds) when the token will expire, or -1 if + // not applicable. + ExpiryTime types.Int64 `tfsdk:"expiry_time"` + // The ID of this token. + TokenId types.String `tfsdk:"token_id"` +} + +// Details required to replace an IP access list. +type ReplaceIpAccessList struct { + // Specifies whether this IP access list is enabled. + Enabled types.Bool `tfsdk:"enabled"` + // The ID for the corresponding IP access list + IpAccessListId types.String `tfsdk:"-" url:"-"` + + IpAddresses []types.String `tfsdk:"ip_addresses"` + // Label for the IP access list. This **cannot** be empty. + Label types.String `tfsdk:"label"` + // Type of IP access list. Valid values are as follows and are + // case-sensitive: + // + // * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block + // list. Exclude this IP or range. IP addresses in the block list are + // excluded even if they are included in an allow list. + ListType ListType `tfsdk:"list_type"` +} + +type ReplaceResponse struct { +} + +type RestrictWorkspaceAdminsMessage struct { + Status RestrictWorkspaceAdminsMessageStatus `tfsdk:"status"` +} + +type RestrictWorkspaceAdminsMessageStatus string + +const RestrictWorkspaceAdminsMessageStatusAllowAll RestrictWorkspaceAdminsMessageStatus = `ALLOW_ALL` + +const RestrictWorkspaceAdminsMessageStatusRestrictTokensAndJobRunAs RestrictWorkspaceAdminsMessageStatus = `RESTRICT_TOKENS_AND_JOB_RUN_AS` + +const RestrictWorkspaceAdminsMessageStatusStatusUnspecified RestrictWorkspaceAdminsMessageStatus = `STATUS_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *RestrictWorkspaceAdminsMessageStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RestrictWorkspaceAdminsMessageStatus) Set(v string) error { + switch v { + case `ALLOW_ALL`, `RESTRICT_TOKENS_AND_JOB_RUN_AS`, `STATUS_UNSPECIFIED`: + *f = RestrictWorkspaceAdminsMessageStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALLOW_ALL", "RESTRICT_TOKENS_AND_JOB_RUN_AS", "STATUS_UNSPECIFIED"`, v) + } +} + +// Type always returns RestrictWorkspaceAdminsMessageStatus to satisfy [pflag.Value] interface +func (f *RestrictWorkspaceAdminsMessageStatus) Type() string { + return "RestrictWorkspaceAdminsMessageStatus" +} + +type RestrictWorkspaceAdminsSetting struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag"` + + RestrictWorkspaceAdmins RestrictWorkspaceAdminsMessage `tfsdk:"restrict_workspace_admins"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name"` +} + +type RevokeTokenRequest struct { + // The ID of the token to be revoked. + TokenId types.String `tfsdk:"token_id"` +} + +type RevokeTokenResponse struct { +} + +type SetStatusResponse struct { +} + +type StringMessage struct { + // Represents a generic string value. + Value types.String `tfsdk:"value"` +} + +type TokenAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel TokenPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type TokenAccessControlResponse struct { + // All permissions. + AllPermissions []TokenPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type TokenInfo struct { + // Comment that describes the purpose of the token, specified by the token + // creator. + Comment types.String `tfsdk:"comment"` + // User ID of the user that created the token. + CreatedById types.Int64 `tfsdk:"created_by_id"` + // Username of the user that created the token. + CreatedByUsername types.String `tfsdk:"created_by_username"` + // Timestamp when the token was created. + CreationTime types.Int64 `tfsdk:"creation_time"` + // Timestamp when the token expires. + ExpiryTime types.Int64 `tfsdk:"expiry_time"` + // User ID of the user that owns the token. + OwnerId types.Int64 `tfsdk:"owner_id"` + // ID of the token. + TokenId types.String `tfsdk:"token_id"` +} + +type TokenPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel TokenPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type TokenPermissionLevel string + +const TokenPermissionLevelCanUse TokenPermissionLevel = `CAN_USE` + +// String representation for [fmt.Print] +func (f *TokenPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TokenPermissionLevel) Set(v string) error { + switch v { + case `CAN_USE`: + *f = TokenPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_USE"`, v) + } +} + +// Type always returns TokenPermissionLevel to satisfy [pflag.Value] interface +func (f *TokenPermissionLevel) Type() string { + return "TokenPermissionLevel" +} + +type TokenPermissions struct { + AccessControlList []TokenAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type TokenPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel TokenPermissionLevel `tfsdk:"permission_level"` +} + +type TokenPermissionsRequest struct { + AccessControlList []TokenAccessControlRequest `tfsdk:"access_control_list"` +} + +// The type of token request. As of now, only `AZURE_ACTIVE_DIRECTORY_TOKEN` is +// supported. +type TokenType string + +const TokenTypeAzureActiveDirectoryToken TokenType = `AZURE_ACTIVE_DIRECTORY_TOKEN` + +// String representation for [fmt.Print] +func (f *TokenType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TokenType) Set(v string) error { + switch v { + case `AZURE_ACTIVE_DIRECTORY_TOKEN`: + *f = TokenType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AZURE_ACTIVE_DIRECTORY_TOKEN"`, v) + } +} + +// Type always returns TokenType to satisfy [pflag.Value] interface +func (f *TokenType) Type() string { + return "TokenType" +} + +// Details required to update a setting. +type UpdateAutomaticClusterUpdateSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask"` + + Setting AutomaticClusterUpdateSetting `tfsdk:"setting"` +} + +// Details required to update a setting. +type UpdateComplianceSecurityProfileSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask"` + + Setting ComplianceSecurityProfileSetting `tfsdk:"setting"` +} + +// Details required to update a setting. +type UpdateCspEnablementAccountSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask"` + + Setting CspEnablementAccountSetting `tfsdk:"setting"` +} + +// Details required to update a setting. +type UpdateDefaultNamespaceSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask"` + // This represents the setting configuration for the default namespace in + // the Databricks workspace. Setting the default catalog for the workspace + // determines the catalog that is used when queries do not reference a fully + // qualified 3 level name. For example, if the default catalog is set to + // 'retail_prod' then a query 'SELECT * FROM myTable' would reference the + // object 'retail_prod.default.myTable' (the schema 'default' is always + // assumed). This setting requires a restart of clusters and SQL warehouses + // to take effect. Additionally, the default namespace only applies when + // using Unity Catalog-enabled compute. + Setting DefaultNamespaceSetting `tfsdk:"setting"` +} + +// Details required to update a setting. +type UpdateEnhancedSecurityMonitoringSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask"` + + Setting EnhancedSecurityMonitoringSetting `tfsdk:"setting"` +} + +// Details required to update a setting. +type UpdateEsmEnablementAccountSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask"` + + Setting EsmEnablementAccountSetting `tfsdk:"setting"` +} + +// Details required to update an IP access list. +type UpdateIpAccessList struct { + // Specifies whether this IP access list is enabled. + Enabled types.Bool `tfsdk:"enabled"` + // The ID for the corresponding IP access list + IpAccessListId types.String `tfsdk:"-" url:"-"` + + IpAddresses []types.String `tfsdk:"ip_addresses"` + // Label for the IP access list. This **cannot** be empty. + Label types.String `tfsdk:"label"` + // Type of IP access list. Valid values are as follows and are + // case-sensitive: + // + // * `ALLOW`: An allow list. Include this IP or range. * `BLOCK`: A block + // list. Exclude this IP or range. IP addresses in the block list are + // excluded even if they are included in an allow list. + ListType ListType `tfsdk:"list_type"` +} + +// Details required to update a setting. +type UpdatePersonalComputeSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask"` + + Setting PersonalComputeSetting `tfsdk:"setting"` +} + +type UpdateResponse struct { +} + +// Details required to update a setting. +type UpdateRestrictWorkspaceAdminsSettingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing"` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask"` + + Setting RestrictWorkspaceAdminsSetting `tfsdk:"setting"` +} + +type WorkspaceConf map[string]types.String diff --git a/service/sharing_tf/model.go b/service/sharing_tf/model.go new file mode 100755 index 0000000000..cdd9818644 --- /dev/null +++ b/service/sharing_tf/model.go @@ -0,0 +1,1042 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package sharing_tf + +import ( + "fmt" + + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// The delta sharing authentication type. +type AuthenticationType string + +const AuthenticationTypeDatabricks AuthenticationType = `DATABRICKS` + +const AuthenticationTypeToken AuthenticationType = `TOKEN` + +// String representation for [fmt.Print] +func (f *AuthenticationType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AuthenticationType) Set(v string) error { + switch v { + case `DATABRICKS`, `TOKEN`: + *f = AuthenticationType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATABRICKS", "TOKEN"`, v) + } +} + +// Type always returns AuthenticationType to satisfy [pflag.Value] interface +func (f *AuthenticationType) Type() string { + return "AuthenticationType" +} + +type CentralCleanRoomInfo struct { + // All assets from all collaborators that are available in the clean room. + // Only one of table_info or notebook_info will be filled in. + CleanRoomAssets []CleanRoomAssetInfo `tfsdk:"clean_room_assets"` + // All collaborators who are in the clean room. + Collaborators []CleanRoomCollaboratorInfo `tfsdk:"collaborators"` + // The collaborator who created the clean room. + Creator *CleanRoomCollaboratorInfo `tfsdk:"creator"` + // The cloud where clean room tasks will be run. + StationCloud types.String `tfsdk:"station_cloud"` + // The region where clean room tasks will be run. + StationRegion types.String `tfsdk:"station_region"` +} + +type CleanRoomAssetInfo struct { + // Time at which this asset was added, in epoch milliseconds. + AddedAt types.Int64 `tfsdk:"added_at"` + // Details about the notebook asset. + NotebookInfo *CleanRoomNotebookInfo `tfsdk:"notebook_info"` + // The collaborator who owns the asset. + Owner *CleanRoomCollaboratorInfo `tfsdk:"owner"` + // Details about the table asset. + TableInfo *CleanRoomTableInfo `tfsdk:"table_info"` + // Time at which this asset was updated, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` +} + +type CleanRoomCatalog struct { + // Name of the catalog in the clean room station. Empty for notebooks. + CatalogName types.String `tfsdk:"catalog_name"` + // The details of the shared notebook files. + NotebookFiles []SharedDataObject `tfsdk:"notebook_files"` + // The details of the shared tables. + Tables []SharedDataObject `tfsdk:"tables"` +} + +type CleanRoomCatalogUpdate struct { + // The name of the catalog to update assets. + CatalogName types.String `tfsdk:"catalog_name"` + // The updates to the assets in the catalog. + Updates *SharedDataObjectUpdate `tfsdk:"updates"` +} + +type CleanRoomCollaboratorInfo struct { + // The global Unity Catalog metastore id of the collaborator. Also known as + // the sharing identifier. The identifier is of format + // __cloud__:__region__:__metastore-uuid__. + GlobalMetastoreId types.String `tfsdk:"global_metastore_id"` + // The organization name of the collaborator. This is configured in the + // metastore for Delta Sharing and is used to identify the organization to + // other collaborators. + OrganizationName types.String `tfsdk:"organization_name"` +} + +type CleanRoomInfo struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Time at which this clean room was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of clean room creator. + CreatedBy types.String `tfsdk:"created_by"` + // Catalog aliases shared by the current collaborator with asset details. + LocalCatalogs []CleanRoomCatalog `tfsdk:"local_catalogs"` + // Name of the clean room. + Name types.String `tfsdk:"name"` + // Username of current owner of clean room. + Owner types.String `tfsdk:"owner"` + // Central clean room details. + RemoteDetailedInfo *CentralCleanRoomInfo `tfsdk:"remote_detailed_info"` + // Time at which this clean room was updated, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of clean room updater. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +type CleanRoomNotebookInfo struct { + // The base64 representation of the notebook content in HTML. + NotebookContent types.String `tfsdk:"notebook_content"` + // The name of the notebook. + NotebookName types.String `tfsdk:"notebook_name"` +} + +type CleanRoomTableInfo struct { + // Name of parent catalog. + CatalogName types.String `tfsdk:"catalog_name"` + // The array of __ColumnInfo__ definitions of the table's columns. + Columns []ColumnInfo `tfsdk:"columns"` + // Full name of table, in form of + // __catalog_name__.__schema_name__.__table_name__ + FullName types.String `tfsdk:"full_name"` + // Name of table, relative to parent schema. + Name types.String `tfsdk:"name"` + // Name of parent schema relative to its parent catalog. + SchemaName types.String `tfsdk:"schema_name"` +} + +type ColumnInfo struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + + Mask *ColumnMask `tfsdk:"mask"` + // Name of Column. + Name types.String `tfsdk:"name"` + // Whether field may be Null (default: true). + Nullable types.Bool `tfsdk:"nullable"` + // Partition index for column. + PartitionIndex types.Int64 `tfsdk:"partition_index"` + // Ordinal position of column (starting at position 0). + Position types.Int64 `tfsdk:"position"` + // Format of IntervalType. + TypeIntervalType types.String `tfsdk:"type_interval_type"` + // Full data type specification, JSON-serialized. + TypeJson types.String `tfsdk:"type_json"` + // Name of type (INT, STRUCT, MAP, etc.). + TypeName ColumnTypeName `tfsdk:"type_name"` + // Digits of precision; required for DecimalTypes. + TypePrecision types.Int64 `tfsdk:"type_precision"` + // Digits to right of decimal; Required for DecimalTypes. + TypeScale types.Int64 `tfsdk:"type_scale"` + // Full data type specification as SQL/catalogString text. + TypeText types.String `tfsdk:"type_text"` +} + +type ColumnMask struct { + // The full name of the column mask SQL UDF. + FunctionName types.String `tfsdk:"function_name"` + // The list of additional table columns to be passed as input to the column + // mask function. The first arg of the mask function should be of the type + // of the column being masked and the types of the rest of the args should + // match the types of columns in 'using_column_names'. + UsingColumnNames []types.String `tfsdk:"using_column_names"` +} + +// Name of type (INT, STRUCT, MAP, etc.). +type ColumnTypeName string + +const ColumnTypeNameArray ColumnTypeName = `ARRAY` + +const ColumnTypeNameBinary ColumnTypeName = `BINARY` + +const ColumnTypeNameBoolean ColumnTypeName = `BOOLEAN` + +const ColumnTypeNameByte ColumnTypeName = `BYTE` + +const ColumnTypeNameChar ColumnTypeName = `CHAR` + +const ColumnTypeNameDate ColumnTypeName = `DATE` + +const ColumnTypeNameDecimal ColumnTypeName = `DECIMAL` + +const ColumnTypeNameDouble ColumnTypeName = `DOUBLE` + +const ColumnTypeNameFloat ColumnTypeName = `FLOAT` + +const ColumnTypeNameInt ColumnTypeName = `INT` + +const ColumnTypeNameInterval ColumnTypeName = `INTERVAL` + +const ColumnTypeNameLong ColumnTypeName = `LONG` + +const ColumnTypeNameMap ColumnTypeName = `MAP` + +const ColumnTypeNameNull ColumnTypeName = `NULL` + +const ColumnTypeNameShort ColumnTypeName = `SHORT` + +const ColumnTypeNameString ColumnTypeName = `STRING` + +const ColumnTypeNameStruct ColumnTypeName = `STRUCT` + +const ColumnTypeNameTableType ColumnTypeName = `TABLE_TYPE` + +const ColumnTypeNameTimestamp ColumnTypeName = `TIMESTAMP` + +const ColumnTypeNameTimestampNtz ColumnTypeName = `TIMESTAMP_NTZ` + +const ColumnTypeNameUserDefinedType ColumnTypeName = `USER_DEFINED_TYPE` + +// String representation for [fmt.Print] +func (f *ColumnTypeName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ColumnTypeName) Set(v string) error { + switch v { + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`: + *f = ColumnTypeName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE"`, v) + } +} + +// Type always returns ColumnTypeName to satisfy [pflag.Value] interface +func (f *ColumnTypeName) Type() string { + return "ColumnTypeName" +} + +type CreateCleanRoom struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Name of the clean room. + Name types.String `tfsdk:"name"` + // Central clean room details. + RemoteDetailedInfo CentralCleanRoomInfo `tfsdk:"remote_detailed_info"` +} + +type CreateProvider struct { + // The delta sharing authentication type. + AuthenticationType AuthenticationType `tfsdk:"authentication_type"` + // Description about the provider. + Comment types.String `tfsdk:"comment"` + // The name of the Provider. + Name types.String `tfsdk:"name"` + // This field is required when the __authentication_type__ is **TOKEN** or + // not provided. + RecipientProfileStr types.String `tfsdk:"recipient_profile_str"` +} + +type CreateRecipient struct { + // The delta sharing authentication type. + AuthenticationType AuthenticationType `tfsdk:"authentication_type"` + // Description about the recipient. + Comment types.String `tfsdk:"comment"` + // The global Unity Catalog metastore id provided by the data recipient. + // This field is required when the __authentication_type__ is + // **DATABRICKS**. The identifier is of format + // __cloud__:__region__:__metastore-uuid__. + DataRecipientGlobalMetastoreId types.String `tfsdk:"data_recipient_global_metastore_id"` + // IP Access List + IpAccessList *IpAccessList `tfsdk:"ip_access_list"` + // Name of Recipient. + Name types.String `tfsdk:"name"` + // Username of the recipient owner. + Owner types.String `tfsdk:"owner"` + // Recipient properties as map of string key-value pairs. + PropertiesKvpairs *SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs"` + // The one-time sharing code provided by the data recipient. This field is + // required when the __authentication_type__ is **DATABRICKS**. + SharingCode types.String `tfsdk:"sharing_code"` +} + +type CreateShare struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Name of the share. + Name types.String `tfsdk:"name"` + // Storage root URL for the share. + StorageRoot types.String `tfsdk:"storage_root"` +} + +// Delete a clean room +type DeleteCleanRoomRequest struct { + // The name of the clean room. + Name types.String `tfsdk:"-" url:"-"` +} + +// Delete a provider +type DeleteProviderRequest struct { + // Name of the provider. + Name types.String `tfsdk:"-" url:"-"` +} + +// Delete a share recipient +type DeleteRecipientRequest struct { + // Name of the recipient. + Name types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete a share +type DeleteShareRequest struct { + // The name of the share. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get a share activation URL +type GetActivationUrlInfoRequest struct { + // The one time activation url. It also accepts activation token. + ActivationUrl types.String `tfsdk:"-" url:"-"` +} + +type GetActivationUrlInfoResponse struct { +} + +// Get a clean room +type GetCleanRoomRequest struct { + // Whether to include remote details (central) on the clean room. + IncludeRemoteDetails types.Bool `tfsdk:"-" url:"include_remote_details,omitempty"` + // The name of the clean room. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get a provider +type GetProviderRequest struct { + // Name of the provider. + Name types.String `tfsdk:"-" url:"-"` +} + +// Get a share recipient +type GetRecipientRequest struct { + // Name of the recipient. + Name types.String `tfsdk:"-" url:"-"` +} + +type GetRecipientSharePermissionsResponse struct { + // An array of data share permissions for a recipient. + PermissionsOut []ShareToPrivilegeAssignment `tfsdk:"permissions_out"` +} + +// Get a share +type GetShareRequest struct { + // Query for data to include in the share. + IncludeSharedData types.Bool `tfsdk:"-" url:"include_shared_data,omitempty"` + // The name of the share. + Name types.String `tfsdk:"-" url:"-"` +} + +type IpAccessList struct { + // Allowed IP Addresses in CIDR notation. Limit of 100. + AllowedIpAddresses []types.String `tfsdk:"allowed_ip_addresses"` +} + +// List clean rooms +type ListCleanRoomsRequest struct { + // Maximum number of clean rooms to return. If not set, all the clean rooms + // are returned (not recommended). - when set to a value greater than 0, the + // page length is the minimum of this value and a server configured value; - + // when set to 0, the page length is set to a server configured value + // (recommended); - when set to a value less than 0, an invalid parameter + // error is returned; + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListCleanRoomsResponse struct { + // An array of clean rooms. Remote details (central) are not included. + CleanRooms []CleanRoomInfo `tfsdk:"clean_rooms"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token"` +} + +type ListProviderSharesResponse struct { + // An array of provider shares. + Shares []ProviderShare `tfsdk:"shares"` +} + +// List providers +type ListProvidersRequest struct { + // If not provided, all providers will be returned. If no providers exist + // with this ID, no results will be returned. + DataProviderGlobalMetastoreId types.String `tfsdk:"-" url:"data_provider_global_metastore_id,omitempty"` +} + +type ListProvidersResponse struct { + // An array of provider information objects. + Providers []ProviderInfo `tfsdk:"providers"` +} + +// List share recipients +type ListRecipientsRequest struct { + // If not provided, all recipients will be returned. If no recipients exist + // with this ID, no results will be returned. + DataRecipientGlobalMetastoreId types.String `tfsdk:"-" url:"data_recipient_global_metastore_id,omitempty"` +} + +type ListRecipientsResponse struct { + // An array of recipient information objects. + Recipients []RecipientInfo `tfsdk:"recipients"` +} + +// List shares by Provider +type ListSharesRequest struct { + // Name of the provider in which to list shares. + Name types.String `tfsdk:"-" url:"-"` +} + +type ListSharesResponse struct { + // An array of data share information objects. + Shares []ShareInfo `tfsdk:"shares"` +} + +type Partition struct { + // An array of partition values. + Values []PartitionValue `tfsdk:"values"` +} + +type PartitionValue struct { + // The name of the partition column. + Name types.String `tfsdk:"name"` + // The operator to apply for the value. + Op PartitionValueOp `tfsdk:"op"` + // The key of a Delta Sharing recipient's property. For example + // `databricks-account-id`. When this field is set, field `value` can not be + // set. + RecipientPropertyKey types.String `tfsdk:"recipient_property_key"` + // The value of the partition column. When this value is not set, it means + // `null` value. When this field is set, field `recipient_property_key` can + // not be set. + Value types.String `tfsdk:"value"` +} + +// The operator to apply for the value. +type PartitionValueOp string + +const PartitionValueOpEqual PartitionValueOp = `EQUAL` + +const PartitionValueOpLike PartitionValueOp = `LIKE` + +// String representation for [fmt.Print] +func (f *PartitionValueOp) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PartitionValueOp) Set(v string) error { + switch v { + case `EQUAL`, `LIKE`: + *f = PartitionValueOp(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EQUAL", "LIKE"`, v) + } +} + +// Type always returns PartitionValueOp to satisfy [pflag.Value] interface +func (f *PartitionValueOp) Type() string { + return "PartitionValueOp" +} + +type Privilege string + +const PrivilegeAccess Privilege = `ACCESS` + +const PrivilegeAllPrivileges Privilege = `ALL_PRIVILEGES` + +const PrivilegeApplyTag Privilege = `APPLY_TAG` + +const PrivilegeCreate Privilege = `CREATE` + +const PrivilegeCreateCatalog Privilege = `CREATE_CATALOG` + +const PrivilegeCreateConnection Privilege = `CREATE_CONNECTION` + +const PrivilegeCreateExternalLocation Privilege = `CREATE_EXTERNAL_LOCATION` + +const PrivilegeCreateExternalTable Privilege = `CREATE_EXTERNAL_TABLE` + +const PrivilegeCreateExternalVolume Privilege = `CREATE_EXTERNAL_VOLUME` + +const PrivilegeCreateForeignCatalog Privilege = `CREATE_FOREIGN_CATALOG` + +const PrivilegeCreateFunction Privilege = `CREATE_FUNCTION` + +const PrivilegeCreateManagedStorage Privilege = `CREATE_MANAGED_STORAGE` + +const PrivilegeCreateMaterializedView Privilege = `CREATE_MATERIALIZED_VIEW` + +const PrivilegeCreateModel Privilege = `CREATE_MODEL` + +const PrivilegeCreateProvider Privilege = `CREATE_PROVIDER` + +const PrivilegeCreateRecipient Privilege = `CREATE_RECIPIENT` + +const PrivilegeCreateSchema Privilege = `CREATE_SCHEMA` + +const PrivilegeCreateServiceCredential Privilege = `CREATE_SERVICE_CREDENTIAL` + +const PrivilegeCreateShare Privilege = `CREATE_SHARE` + +const PrivilegeCreateStorageCredential Privilege = `CREATE_STORAGE_CREDENTIAL` + +const PrivilegeCreateTable Privilege = `CREATE_TABLE` + +const PrivilegeCreateView Privilege = `CREATE_VIEW` + +const PrivilegeCreateVolume Privilege = `CREATE_VOLUME` + +const PrivilegeExecute Privilege = `EXECUTE` + +const PrivilegeManageAllowlist Privilege = `MANAGE_ALLOWLIST` + +const PrivilegeModify Privilege = `MODIFY` + +const PrivilegeReadFiles Privilege = `READ_FILES` + +const PrivilegeReadPrivateFiles Privilege = `READ_PRIVATE_FILES` + +const PrivilegeReadVolume Privilege = `READ_VOLUME` + +const PrivilegeRefresh Privilege = `REFRESH` + +const PrivilegeSelect Privilege = `SELECT` + +const PrivilegeSetSharePermission Privilege = `SET_SHARE_PERMISSION` + +const PrivilegeUsage Privilege = `USAGE` + +const PrivilegeUseCatalog Privilege = `USE_CATALOG` + +const PrivilegeUseConnection Privilege = `USE_CONNECTION` + +const PrivilegeUseMarketplaceAssets Privilege = `USE_MARKETPLACE_ASSETS` + +const PrivilegeUseProvider Privilege = `USE_PROVIDER` + +const PrivilegeUseRecipient Privilege = `USE_RECIPIENT` + +const PrivilegeUseSchema Privilege = `USE_SCHEMA` + +const PrivilegeUseShare Privilege = `USE_SHARE` + +const PrivilegeWriteFiles Privilege = `WRITE_FILES` + +const PrivilegeWritePrivateFiles Privilege = `WRITE_PRIVATE_FILES` + +const PrivilegeWriteVolume Privilege = `WRITE_VOLUME` + +// String representation for [fmt.Print] +func (f *Privilege) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Privilege) Set(v string) error { + switch v { + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + *f = Privilege(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + } +} + +// Type always returns Privilege to satisfy [pflag.Value] interface +func (f *Privilege) Type() string { + return "Privilege" +} + +type PrivilegeAssignment struct { + // The principal (user email address or group name). + Principal types.String `tfsdk:"principal"` + // The privileges assigned to the principal. + Privileges []Privilege `tfsdk:"privileges"` +} + +type ProviderInfo struct { + // The delta sharing authentication type. + AuthenticationType AuthenticationType `tfsdk:"authentication_type"` + // Cloud vendor of the provider's UC metastore. This field is only present + // when the __authentication_type__ is **DATABRICKS**. + Cloud types.String `tfsdk:"cloud"` + // Description about the provider. + Comment types.String `tfsdk:"comment"` + // Time at which this Provider was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of Provider creator. + CreatedBy types.String `tfsdk:"created_by"` + // The global UC metastore id of the data provider. This field is only + // present when the __authentication_type__ is **DATABRICKS**. The + // identifier is of format ::. + DataProviderGlobalMetastoreId types.String `tfsdk:"data_provider_global_metastore_id"` + // UUID of the provider's UC metastore. This field is only present when the + // __authentication_type__ is **DATABRICKS**. + MetastoreId types.String `tfsdk:"metastore_id"` + // The name of the Provider. + Name types.String `tfsdk:"name"` + // Username of Provider owner. + Owner types.String `tfsdk:"owner"` + // The recipient profile. This field is only present when the + // authentication_type is `TOKEN`. + RecipientProfile *RecipientProfile `tfsdk:"recipient_profile"` + // This field is only present when the authentication_type is `TOKEN` or not + // provided. + RecipientProfileStr types.String `tfsdk:"recipient_profile_str"` + // Cloud region of the provider's UC metastore. This field is only present + // when the __authentication_type__ is **DATABRICKS**. + Region types.String `tfsdk:"region"` + // Time at which this Provider was created, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of user who last modified Share. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +type ProviderShare struct { + // The name of the Provider Share. + Name types.String `tfsdk:"name"` +} + +type RecipientInfo struct { + // A boolean status field showing whether the Recipient's activation URL has + // been exercised or not. + Activated types.Bool `tfsdk:"activated"` + // Full activation url to retrieve the access token. It will be empty if the + // token is already retrieved. + ActivationUrl types.String `tfsdk:"activation_url"` + // The delta sharing authentication type. + AuthenticationType AuthenticationType `tfsdk:"authentication_type"` + // Cloud vendor of the recipient's Unity Catalog Metstore. This field is + // only present when the __authentication_type__ is **DATABRICKS**`. + Cloud types.String `tfsdk:"cloud"` + // Description about the recipient. + Comment types.String `tfsdk:"comment"` + // Time at which this recipient was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of recipient creator. + CreatedBy types.String `tfsdk:"created_by"` + // The global Unity Catalog metastore id provided by the data recipient. + // This field is only present when the __authentication_type__ is + // **DATABRICKS**. The identifier is of format + // __cloud__:__region__:__metastore-uuid__. + DataRecipientGlobalMetastoreId types.String `tfsdk:"data_recipient_global_metastore_id"` + // IP Access List + IpAccessList *IpAccessList `tfsdk:"ip_access_list"` + // Unique identifier of recipient's Unity Catalog metastore. This field is + // only present when the __authentication_type__ is **DATABRICKS** + MetastoreId types.String `tfsdk:"metastore_id"` + // Name of Recipient. + Name types.String `tfsdk:"name"` + // Username of the recipient owner. + Owner types.String `tfsdk:"owner"` + // Recipient properties as map of string key-value pairs. + PropertiesKvpairs *SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs"` + // Cloud region of the recipient's Unity Catalog Metstore. This field is + // only present when the __authentication_type__ is **DATABRICKS**. + Region types.String `tfsdk:"region"` + // The one-time sharing code provided by the data recipient. This field is + // only present when the __authentication_type__ is **DATABRICKS**. + SharingCode types.String `tfsdk:"sharing_code"` + // This field is only present when the __authentication_type__ is **TOKEN**. + Tokens []RecipientTokenInfo `tfsdk:"tokens"` + // Time at which the recipient was updated, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of recipient updater. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +type RecipientProfile struct { + // The token used to authorize the recipient. + BearerToken types.String `tfsdk:"bearer_token"` + // The endpoint for the share to be used by the recipient. + Endpoint types.String `tfsdk:"endpoint"` + // The version number of the recipient's credentials on a share. + ShareCredentialsVersion types.Int64 `tfsdk:"share_credentials_version"` +} + +type RecipientTokenInfo struct { + // Full activation URL to retrieve the access token. It will be empty if the + // token is already retrieved. + ActivationUrl types.String `tfsdk:"activation_url"` + // Time at which this recipient Token was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of recipient token creator. + CreatedBy types.String `tfsdk:"created_by"` + // Expiration timestamp of the token in epoch milliseconds. + ExpirationTime types.Int64 `tfsdk:"expiration_time"` + // Unique ID of the recipient token. + Id types.String `tfsdk:"id"` + // Time at which this recipient Token was updated, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of recipient Token updater. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +// Get an access token +type RetrieveTokenRequest struct { + // The one time activation url. It also accepts activation token. + ActivationUrl types.String `tfsdk:"-" url:"-"` +} + +type RetrieveTokenResponse struct { + // The token used to authorize the recipient. + BearerToken types.String `tfsdk:"bearerToken"` + // The endpoint for the share to be used by the recipient. + Endpoint types.String `tfsdk:"endpoint"` + // Expiration timestamp of the token in epoch milliseconds. + ExpirationTime types.String `tfsdk:"expirationTime"` + // These field names must follow the delta sharing protocol. + ShareCredentialsVersion types.Int64 `tfsdk:"shareCredentialsVersion"` +} + +type RotateRecipientToken struct { + // The expiration time of the bearer token in ISO 8601 format. This will set + // the expiration_time of existing token only to a smaller timestamp, it + // cannot extend the expiration_time. Use 0 to expire the existing token + // immediately, negative number will return an error. + ExistingTokenExpireInSeconds types.Int64 `tfsdk:"existing_token_expire_in_seconds"` + // The name of the recipient. + Name types.String `tfsdk:"-" url:"-"` +} + +// An object with __properties__ containing map of key-value properties attached +// to the securable. +type SecurablePropertiesKvPairs struct { + // A map of key-value properties attached to the securable. + Properties map[string]types.String `tfsdk:"properties"` +} + +// A map of key-value properties attached to the securable. +type SecurablePropertiesMap map[string]types.String + +type ShareInfo struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // Time at which this share was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at"` + // Username of share creator. + CreatedBy types.String `tfsdk:"created_by"` + // Name of the share. + Name types.String `tfsdk:"name"` + // A list of shared data objects within the share. + Objects []SharedDataObject `tfsdk:"objects"` + // Username of current owner of share. + Owner types.String `tfsdk:"owner"` + // Storage Location URL (full path) for the share. + StorageLocation types.String `tfsdk:"storage_location"` + // Storage root URL for the share. + StorageRoot types.String `tfsdk:"storage_root"` + // Time at which this share was updated, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at"` + // Username of share updater. + UpdatedBy types.String `tfsdk:"updated_by"` +} + +// Get recipient share permissions +type SharePermissionsRequest struct { + // The name of the Recipient. + Name types.String `tfsdk:"-" url:"-"` +} + +type ShareToPrivilegeAssignment struct { + // The privileges assigned to the principal. + PrivilegeAssignments []PrivilegeAssignment `tfsdk:"privilege_assignments"` + // The share name. + ShareName types.String `tfsdk:"share_name"` +} + +type SharedDataObject struct { + // The time when this data object is added to the share, in epoch + // milliseconds. + AddedAt types.Int64 `tfsdk:"added_at"` + // Username of the sharer. + AddedBy types.String `tfsdk:"added_by"` + // Whether to enable cdf or indicate if cdf is enabled on the shared object. + CdfEnabled types.Bool `tfsdk:"cdf_enabled"` + // A user-provided comment when adding the data object to the share. + // [Update:OPT] + Comment types.String `tfsdk:"comment"` + // The content of the notebook file when the data object type is + // NOTEBOOK_FILE. This should be base64 encoded. Required for adding a + // NOTEBOOK_FILE, optional for updating, ignored for other types. + Content types.String `tfsdk:"content"` + // The type of the data object. + DataObjectType SharedDataObjectDataObjectType `tfsdk:"data_object_type"` + // Whether to enable or disable sharing of data history. If not specified, + // the default is **DISABLED**. + HistoryDataSharingStatus SharedDataObjectHistoryDataSharingStatus `tfsdk:"history_data_sharing_status"` + // A fully qualified name that uniquely identifies a data object. + // + // For example, a table's fully qualified name is in the format of + // `..`. + Name types.String `tfsdk:"name"` + // Array of partitions for the shared data. + Partitions []Partition `tfsdk:"partitions"` + // A user-provided new name for the data object within the share. If this + // new name is not provided, the object's original name will be used as the + // `shared_as` name. The `shared_as` name must be unique within a share. For + // tables, the new name must follow the format of `.
`. + SharedAs types.String `tfsdk:"shared_as"` + // The start version associated with the object. This allows data providers + // to control the lowest object version that is accessible by clients. If + // specified, clients can query snapshots or changes for versions >= + // start_version. If not specified, clients can only query starting from the + // version of the object at the time it was added to the share. + // + // NOTE: The start_version should be <= the `current` version of the object. + StartVersion types.Int64 `tfsdk:"start_version"` + // One of: **ACTIVE**, **PERMISSION_DENIED**. + Status SharedDataObjectStatus `tfsdk:"status"` + // A user-provided new name for the data object within the share. If this + // new name is not provided, the object's original name will be used as the + // `string_shared_as` name. The `string_shared_as` name must be unique + // within a share. For notebooks, the new name should be the new notebook + // file name. + StringSharedAs types.String `tfsdk:"string_shared_as"` +} + +// The type of the data object. +type SharedDataObjectDataObjectType string + +const SharedDataObjectDataObjectTypeMaterializedView SharedDataObjectDataObjectType = `MATERIALIZED_VIEW` + +const SharedDataObjectDataObjectTypeModel SharedDataObjectDataObjectType = `MODEL` + +const SharedDataObjectDataObjectTypeNotebookFile SharedDataObjectDataObjectType = `NOTEBOOK_FILE` + +const SharedDataObjectDataObjectTypeSchema SharedDataObjectDataObjectType = `SCHEMA` + +const SharedDataObjectDataObjectTypeStreamingTable SharedDataObjectDataObjectType = `STREAMING_TABLE` + +const SharedDataObjectDataObjectTypeTable SharedDataObjectDataObjectType = `TABLE` + +const SharedDataObjectDataObjectTypeView SharedDataObjectDataObjectType = `VIEW` + +// String representation for [fmt.Print] +func (f *SharedDataObjectDataObjectType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharedDataObjectDataObjectType) Set(v string) error { + switch v { + case `MATERIALIZED_VIEW`, `MODEL`, `NOTEBOOK_FILE`, `SCHEMA`, `STREAMING_TABLE`, `TABLE`, `VIEW`: + *f = SharedDataObjectDataObjectType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MATERIALIZED_VIEW", "MODEL", "NOTEBOOK_FILE", "SCHEMA", "STREAMING_TABLE", "TABLE", "VIEW"`, v) + } +} + +// Type always returns SharedDataObjectDataObjectType to satisfy [pflag.Value] interface +func (f *SharedDataObjectDataObjectType) Type() string { + return "SharedDataObjectDataObjectType" +} + +// Whether to enable or disable sharing of data history. If not specified, the +// default is **DISABLED**. +type SharedDataObjectHistoryDataSharingStatus string + +const SharedDataObjectHistoryDataSharingStatusDisabled SharedDataObjectHistoryDataSharingStatus = `DISABLED` + +const SharedDataObjectHistoryDataSharingStatusEnabled SharedDataObjectHistoryDataSharingStatus = `ENABLED` + +// String representation for [fmt.Print] +func (f *SharedDataObjectHistoryDataSharingStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharedDataObjectHistoryDataSharingStatus) Set(v string) error { + switch v { + case `DISABLED`, `ENABLED`: + *f = SharedDataObjectHistoryDataSharingStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DISABLED", "ENABLED"`, v) + } +} + +// Type always returns SharedDataObjectHistoryDataSharingStatus to satisfy [pflag.Value] interface +func (f *SharedDataObjectHistoryDataSharingStatus) Type() string { + return "SharedDataObjectHistoryDataSharingStatus" +} + +// One of: **ACTIVE**, **PERMISSION_DENIED**. +type SharedDataObjectStatus string + +const SharedDataObjectStatusActive SharedDataObjectStatus = `ACTIVE` + +const SharedDataObjectStatusPermissionDenied SharedDataObjectStatus = `PERMISSION_DENIED` + +// String representation for [fmt.Print] +func (f *SharedDataObjectStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharedDataObjectStatus) Set(v string) error { + switch v { + case `ACTIVE`, `PERMISSION_DENIED`: + *f = SharedDataObjectStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "PERMISSION_DENIED"`, v) + } +} + +// Type always returns SharedDataObjectStatus to satisfy [pflag.Value] interface +func (f *SharedDataObjectStatus) Type() string { + return "SharedDataObjectStatus" +} + +type SharedDataObjectUpdate struct { + // One of: **ADD**, **REMOVE**, **UPDATE**. + Action SharedDataObjectUpdateAction `tfsdk:"action"` + // The data object that is being added, removed, or updated. + DataObject *SharedDataObject `tfsdk:"data_object"` +} + +// One of: **ADD**, **REMOVE**, **UPDATE**. +type SharedDataObjectUpdateAction string + +const SharedDataObjectUpdateActionAdd SharedDataObjectUpdateAction = `ADD` + +const SharedDataObjectUpdateActionRemove SharedDataObjectUpdateAction = `REMOVE` + +const SharedDataObjectUpdateActionUpdate SharedDataObjectUpdateAction = `UPDATE` + +// String representation for [fmt.Print] +func (f *SharedDataObjectUpdateAction) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharedDataObjectUpdateAction) Set(v string) error { + switch v { + case `ADD`, `REMOVE`, `UPDATE`: + *f = SharedDataObjectUpdateAction(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ADD", "REMOVE", "UPDATE"`, v) + } +} + +// Type always returns SharedDataObjectUpdateAction to satisfy [pflag.Value] interface +func (f *SharedDataObjectUpdateAction) Type() string { + return "SharedDataObjectUpdateAction" +} + +type UpdateCleanRoom struct { + // Array of shared data object updates. + CatalogUpdates []CleanRoomCatalogUpdate `tfsdk:"catalog_updates"` + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // The name of the clean room. + Name types.String `tfsdk:"-" url:"-"` + // Username of current owner of clean room. + Owner types.String `tfsdk:"owner"` +} + +type UpdatePermissionsResponse struct { +} + +type UpdateProvider struct { + // Description about the provider. + Comment types.String `tfsdk:"comment"` + // Name of the provider. + Name types.String `tfsdk:"-" url:"-"` + // New name for the provider. + NewName types.String `tfsdk:"new_name"` + // Username of Provider owner. + Owner types.String `tfsdk:"owner"` + // This field is required when the __authentication_type__ is **TOKEN** or + // not provided. + RecipientProfileStr types.String `tfsdk:"recipient_profile_str"` +} + +type UpdateRecipient struct { + // Description about the recipient. + Comment types.String `tfsdk:"comment"` + // IP Access List + IpAccessList *IpAccessList `tfsdk:"ip_access_list"` + // Name of the recipient. + Name types.String `tfsdk:"-" url:"-"` + // New name for the recipient. + NewName types.String `tfsdk:"new_name"` + // Username of the recipient owner. + Owner types.String `tfsdk:"owner"` + // Recipient properties as map of string key-value pairs. When provided in + // update request, the specified properties will override the existing + // properties. To add and remove properties, one would need to perform a + // read-modify-write. + PropertiesKvpairs *SecurablePropertiesKvPairs `tfsdk:"properties_kvpairs"` +} + +type UpdateResponse struct { +} + +type UpdateShare struct { + // User-provided free-form text description. + Comment types.String `tfsdk:"comment"` + // The name of the share. + Name types.String `tfsdk:"-" url:"-"` + // New name for the share. + NewName types.String `tfsdk:"new_name"` + // Username of current owner of share. + Owner types.String `tfsdk:"owner"` + // Storage root URL for the share. + StorageRoot types.String `tfsdk:"storage_root"` + // Array of shared data object updates. + Updates []SharedDataObjectUpdate `tfsdk:"updates"` +} + +type UpdateSharePermissions struct { + // Array of permission changes. + Changes []catalog.PermissionsChange `tfsdk:"changes"` + // The name of the share. + Name types.String `tfsdk:"-" url:"-"` +} diff --git a/service/sql_tf/model.go b/service/sql_tf/model.go new file mode 100755 index 0000000000..63c20e3db3 --- /dev/null +++ b/service/sql_tf/model.go @@ -0,0 +1,3144 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package sql_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AccessControl struct { + GroupName types.String `tfsdk:"group_name"` + // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * + // `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query + PermissionLevel PermissionLevel `tfsdk:"permission_level"` + + UserName types.String `tfsdk:"user_name"` +} + +type Alert struct { + // Timestamp when the alert was created. + CreatedAt types.String `tfsdk:"created_at"` + // Alert ID. + Id types.String `tfsdk:"id"` + // Timestamp when the alert was last triggered. + LastTriggeredAt types.String `tfsdk:"last_triggered_at"` + // Name of the alert. + Name types.String `tfsdk:"name"` + // Alert configuration options. + Options *AlertOptions `tfsdk:"options"` + // The identifier of the workspace folder containing the object. + Parent types.String `tfsdk:"parent"` + + Query *AlertQuery `tfsdk:"query"` + // Number of seconds after being triggered before the alert rearms itself + // and can be triggered again. If `null`, alert will never be triggered + // again. + Rearm types.Int64 `tfsdk:"rearm"` + // State of the alert. Possible values are: `unknown` (yet to be evaluated), + // `triggered` (evaluated and fulfilled trigger conditions), or `ok` + // (evaluated and did not fulfill trigger conditions). + State AlertState `tfsdk:"state"` + // Timestamp when the alert was last updated. + UpdatedAt types.String `tfsdk:"updated_at"` + + User *User `tfsdk:"user"` +} + +// Alert configuration options. +type AlertOptions struct { + // Name of column in the query result to compare in alert evaluation. + Column types.String `tfsdk:"column"` + // Custom body of alert notification, if it exists. See [here] for custom + // templating instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomBody types.String `tfsdk:"custom_body"` + // Custom subject of alert notification, if it exists. This includes email + // subject, Slack notification header, etc. See [here] for custom templating + // instructions. + // + // [here]: https://docs.databricks.com/sql/user/alerts/index.html + CustomSubject types.String `tfsdk:"custom_subject"` + // State that alert evaluates to when query result is empty. + EmptyResultState AlertOptionsEmptyResultState `tfsdk:"empty_result_state"` + // Whether or not the alert is muted. If an alert is muted, it will not + // notify users and notification destinations when triggered. + Muted types.Bool `tfsdk:"muted"` + // Operator used to compare in alert evaluation: `>`, `>=`, `<`, `<=`, `==`, + // `!=` + Op types.String `tfsdk:"op"` + // Value used to compare in alert evaluation. Supported types include + // strings (eg. 'foobar'), floats (eg. 123.4), and booleans (true). + Value any `tfsdk:"value"` +} + +// State that alert evaluates to when query result is empty. +type AlertOptionsEmptyResultState string + +const AlertOptionsEmptyResultStateOk AlertOptionsEmptyResultState = `ok` + +const AlertOptionsEmptyResultStateTriggered AlertOptionsEmptyResultState = `triggered` + +const AlertOptionsEmptyResultStateUnknown AlertOptionsEmptyResultState = `unknown` + +// String representation for [fmt.Print] +func (f *AlertOptionsEmptyResultState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertOptionsEmptyResultState) Set(v string) error { + switch v { + case `ok`, `triggered`, `unknown`: + *f = AlertOptionsEmptyResultState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ok", "triggered", "unknown"`, v) + } +} + +// Type always returns AlertOptionsEmptyResultState to satisfy [pflag.Value] interface +func (f *AlertOptionsEmptyResultState) Type() string { + return "AlertOptionsEmptyResultState" +} + +type AlertQuery struct { + // The timestamp when this query was created. + CreatedAt types.String `tfsdk:"created_at"` + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + DataSourceId types.String `tfsdk:"data_source_id"` + // General description that conveys additional information about this query + // such as usage notes. + Description types.String `tfsdk:"description"` + // Query ID. + Id types.String `tfsdk:"id"` + // Indicates whether the query is trashed. Trashed queries can't be used in + // dashboards, or appear in search results. If this boolean is `true`, the + // `options` property for this query includes a `moved_to_trash_at` + // timestamp. Trashed queries are permanently deleted after 30 days. + IsArchived types.Bool `tfsdk:"is_archived"` + // Whether the query is a draft. Draft queries only appear in list views for + // their owners. Visualizations from draft queries cannot appear on + // dashboards. + IsDraft types.Bool `tfsdk:"is_draft"` + // Text parameter types are not safe from SQL injection for all types of + // data source. Set this Boolean parameter to `true` if a query either does + // not use any text type parameters or uses a data source type where text + // type parameters are handled safely. + IsSafe types.Bool `tfsdk:"is_safe"` + // The title of this query that appears in list views, widget headings, and + // on the query page. + Name types.String `tfsdk:"name"` + + Options *QueryOptions `tfsdk:"options"` + // The text of the query to be run. + Query types.String `tfsdk:"query"` + + Tags []types.String `tfsdk:"tags"` + // The timestamp at which this query was last updated. + UpdatedAt types.String `tfsdk:"updated_at"` + // The ID of the user who owns the query. + UserId types.Int64 `tfsdk:"user_id"` +} + +// State of the alert. Possible values are: `unknown` (yet to be evaluated), +// `triggered` (evaluated and fulfilled trigger conditions), or `ok` (evaluated +// and did not fulfill trigger conditions). +type AlertState string + +const AlertStateOk AlertState = `ok` + +const AlertStateTriggered AlertState = `triggered` + +const AlertStateUnknown AlertState = `unknown` + +// String representation for [fmt.Print] +func (f *AlertState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertState) Set(v string) error { + switch v { + case `ok`, `triggered`, `unknown`: + *f = AlertState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ok", "triggered", "unknown"`, v) + } +} + +// Type always returns AlertState to satisfy [pflag.Value] interface +func (f *AlertState) Type() string { + return "AlertState" +} + +// Describes metadata for a particular chunk, within a result set; this +// structure is used both within a manifest, and when fetching individual chunk +// data or links. +type BaseChunkInfo struct { + // The number of bytes in the result chunk. This field is not available when + // using `INLINE` disposition. + ByteCount types.Int64 `tfsdk:"byte_count"` + // The position within the sequence of result set chunks. + ChunkIndex types.Int64 `tfsdk:"chunk_index"` + // The number of rows within the result chunk. + RowCount types.Int64 `tfsdk:"row_count"` + // The starting row offset within the result set. + RowOffset types.Int64 `tfsdk:"row_offset"` +} + +// Cancel statement execution +type CancelExecutionRequest struct { + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId types.String `tfsdk:"-" url:"-"` +} + +type CancelExecutionResponse struct { +} + +type Channel struct { + DbsqlVersion types.String `tfsdk:"dbsql_version"` + + Name ChannelName `tfsdk:"name"` +} + +// Channel information for the SQL warehouse at the time of query execution +type ChannelInfo struct { + // DBSQL Version the channel is mapped to + DbsqlVersion types.String `tfsdk:"dbsql_version"` + // Name of the channel + Name ChannelName `tfsdk:"name"` +} + +// Name of the channel +type ChannelName string + +const ChannelNameChannelNameCurrent ChannelName = `CHANNEL_NAME_CURRENT` + +const ChannelNameChannelNameCustom ChannelName = `CHANNEL_NAME_CUSTOM` + +const ChannelNameChannelNamePreview ChannelName = `CHANNEL_NAME_PREVIEW` + +const ChannelNameChannelNamePrevious ChannelName = `CHANNEL_NAME_PREVIOUS` + +const ChannelNameChannelNameUnspecified ChannelName = `CHANNEL_NAME_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *ChannelName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ChannelName) Set(v string) error { + switch v { + case `CHANNEL_NAME_CURRENT`, `CHANNEL_NAME_CUSTOM`, `CHANNEL_NAME_PREVIEW`, `CHANNEL_NAME_PREVIOUS`, `CHANNEL_NAME_UNSPECIFIED`: + *f = ChannelName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CHANNEL_NAME_CURRENT", "CHANNEL_NAME_CUSTOM", "CHANNEL_NAME_PREVIEW", "CHANNEL_NAME_PREVIOUS", "CHANNEL_NAME_UNSPECIFIED"`, v) + } +} + +// Type always returns ChannelName to satisfy [pflag.Value] interface +func (f *ChannelName) Type() string { + return "ChannelName" +} + +type ColumnInfo struct { + // The name of the column. + Name types.String `tfsdk:"name"` + // The ordinal position of the column (starting at position 0). + Position types.Int64 `tfsdk:"position"` + // The format of the interval type. + TypeIntervalType types.String `tfsdk:"type_interval_type"` + // The name of the base data type. This doesn't include details for complex + // types such as STRUCT, MAP or ARRAY. + TypeName ColumnInfoTypeName `tfsdk:"type_name"` + // Specifies the number of digits in a number. This applies to the DECIMAL + // type. + TypePrecision types.Int64 `tfsdk:"type_precision"` + // Specifies the number of digits to the right of the decimal point in a + // number. This applies to the DECIMAL type. + TypeScale types.Int64 `tfsdk:"type_scale"` + // The full SQL type specification. + TypeText types.String `tfsdk:"type_text"` +} + +// The name of the base data type. This doesn't include details for complex +// types such as STRUCT, MAP or ARRAY. +type ColumnInfoTypeName string + +const ColumnInfoTypeNameArray ColumnInfoTypeName = `ARRAY` + +const ColumnInfoTypeNameBinary ColumnInfoTypeName = `BINARY` + +const ColumnInfoTypeNameBoolean ColumnInfoTypeName = `BOOLEAN` + +const ColumnInfoTypeNameByte ColumnInfoTypeName = `BYTE` + +const ColumnInfoTypeNameChar ColumnInfoTypeName = `CHAR` + +const ColumnInfoTypeNameDate ColumnInfoTypeName = `DATE` + +const ColumnInfoTypeNameDecimal ColumnInfoTypeName = `DECIMAL` + +const ColumnInfoTypeNameDouble ColumnInfoTypeName = `DOUBLE` + +const ColumnInfoTypeNameFloat ColumnInfoTypeName = `FLOAT` + +const ColumnInfoTypeNameInt ColumnInfoTypeName = `INT` + +const ColumnInfoTypeNameInterval ColumnInfoTypeName = `INTERVAL` + +const ColumnInfoTypeNameLong ColumnInfoTypeName = `LONG` + +const ColumnInfoTypeNameMap ColumnInfoTypeName = `MAP` + +const ColumnInfoTypeNameNull ColumnInfoTypeName = `NULL` + +const ColumnInfoTypeNameShort ColumnInfoTypeName = `SHORT` + +const ColumnInfoTypeNameString ColumnInfoTypeName = `STRING` + +const ColumnInfoTypeNameStruct ColumnInfoTypeName = `STRUCT` + +const ColumnInfoTypeNameTimestamp ColumnInfoTypeName = `TIMESTAMP` + +const ColumnInfoTypeNameUserDefinedType ColumnInfoTypeName = `USER_DEFINED_TYPE` + +// String representation for [fmt.Print] +func (f *ColumnInfoTypeName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ColumnInfoTypeName) Set(v string) error { + switch v { + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TIMESTAMP`, `USER_DEFINED_TYPE`: + *f = ColumnInfoTypeName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TIMESTAMP", "USER_DEFINED_TYPE"`, v) + } +} + +// Type always returns ColumnInfoTypeName to satisfy [pflag.Value] interface +func (f *ColumnInfoTypeName) Type() string { + return "ColumnInfoTypeName" +} + +type CreateAlert struct { + // Name of the alert. + Name types.String `tfsdk:"name"` + // Alert configuration options. + Options AlertOptions `tfsdk:"options"` + // The identifier of the workspace folder containing the object. + Parent types.String `tfsdk:"parent"` + // Query ID. + QueryId types.String `tfsdk:"query_id"` + // Number of seconds after being triggered before the alert rearms itself + // and can be triggered again. If `null`, alert will never be triggered + // again. + Rearm types.Int64 `tfsdk:"rearm"` +} + +// Add visualization to a query +type CreateQueryVisualizationRequest struct { + // A short description of this visualization. This is not displayed in the + // UI. + Description types.String `tfsdk:"description"` + // The name of the visualization that appears on dashboards and the query + // screen. + Name types.String `tfsdk:"name"` + // The options object varies widely from one visualization type to the next + // and is unsupported. Databricks does not recommend modifying visualization + // settings in JSON. + Options any `tfsdk:"options"` + // The identifier returned by :method:queries/create + QueryId types.String `tfsdk:"query_id"` + // The type of visualization: chart, table, pivot table, and so on. + Type types.String `tfsdk:"type"` +} + +type CreateWarehouseRequest struct { + // The amount of time in minutes that a SQL warehouse must be idle (i.e., no + // RUNNING queries) before it is automatically stopped. + // + // Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + // + // Defaults to 120 mins + AutoStopMins types.Int64 `tfsdk:"auto_stop_mins"` + // Channel Details + Channel *Channel `tfsdk:"channel"` + // Size of the clusters allocated for this warehouse. Increasing the size of + // a spark cluster allows you to run larger queries on it. If you want to + // increase the number of concurrent queries, please tune max_num_clusters. + // + // Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large + // - 2X-Large - 3X-Large - 4X-Large + ClusterSize types.String `tfsdk:"cluster_size"` + // warehouse creator name + CreatorName types.String `tfsdk:"creator_name"` + // Configures whether the warehouse should use Photon optimized clusters. + // + // Defaults to false. + EnablePhoton types.Bool `tfsdk:"enable_photon"` + // Configures whether the warehouse should use serverless compute + EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute"` + // Deprecated. Instance profile used to pass IAM role to the cluster + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // Maximum number of clusters that the autoscaler will create to handle + // concurrent queries. + // + // Supported values: - Must be >= min_num_clusters - Must be <= 30. + // + // Defaults to min_clusters if unset. + MaxNumClusters types.Int64 `tfsdk:"max_num_clusters"` + // Minimum number of available clusters that will be maintained for this SQL + // warehouse. Increasing this will ensure that a larger number of clusters + // are always running and therefore may reduce the cold start time for new + // queries. This is similar to reserved vs. revocable cores in a resource + // manager. + // + // Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) + // + // Defaults to 1 + MinNumClusters types.Int64 `tfsdk:"min_num_clusters"` + // Logical name for the cluster. + // + // Supported values: - Must be unique within an org. - Must be less than 100 + // characters. + Name types.String `tfsdk:"name"` + // Configurations whether the warehouse should use spot instances. + SpotInstancePolicy SpotInstancePolicy `tfsdk:"spot_instance_policy"` + // A set of key-value pairs that will be tagged on all resources (e.g., AWS + // instances and EBS volumes) associated with this SQL warehouse. + // + // Supported values: - Number of tags < 45. + Tags *EndpointTags `tfsdk:"tags"` + // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless + // compute, you must set to `PRO` and also set the field + // `enable_serverless_compute` to `true`. + WarehouseType CreateWarehouseRequestWarehouseType `tfsdk:"warehouse_type"` +} + +// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, +// you must set to `PRO` and also set the field `enable_serverless_compute` to +// `true`. +type CreateWarehouseRequestWarehouseType string + +const CreateWarehouseRequestWarehouseTypeClassic CreateWarehouseRequestWarehouseType = `CLASSIC` + +const CreateWarehouseRequestWarehouseTypePro CreateWarehouseRequestWarehouseType = `PRO` + +const CreateWarehouseRequestWarehouseTypeTypeUnspecified CreateWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *CreateWarehouseRequestWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CreateWarehouseRequestWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = CreateWarehouseRequestWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns CreateWarehouseRequestWarehouseType to satisfy [pflag.Value] interface +func (f *CreateWarehouseRequestWarehouseType) Type() string { + return "CreateWarehouseRequestWarehouseType" +} + +type CreateWarehouseResponse struct { + // Id for the SQL warehouse. This value is unique across all SQL warehouses. + Id types.String `tfsdk:"id"` +} + +type CreateWidget struct { + // Dashboard ID returned by :method:dashboards/create. + DashboardId types.String `tfsdk:"dashboard_id"` + // Widget ID returned by :method:dashboardwidgets/create + Id types.String `tfsdk:"-" url:"-"` + + Options WidgetOptions `tfsdk:"options"` + // If this is a textbox widget, the application displays this text. This + // field is ignored if the widget contains a visualization in the + // `visualization` field. + Text types.String `tfsdk:"text"` + // Query Vizualization ID returned by :method:queryvisualizations/create. + VisualizationId types.String `tfsdk:"visualization_id"` + // Width of a widget + Width types.Int64 `tfsdk:"width"` +} + +// A JSON representing a dashboard containing widgets of visualizations and text +// boxes. +type Dashboard struct { + // Whether the authenticated user can edit the query definition. + CanEdit types.Bool `tfsdk:"can_edit"` + // Timestamp when this dashboard was created. + CreatedAt types.String `tfsdk:"created_at"` + // In the web application, query filters that share a name are coupled to a + // single selection box if this value is `true`. + DashboardFiltersEnabled types.Bool `tfsdk:"dashboard_filters_enabled"` + // The ID for this dashboard. + Id types.String `tfsdk:"id"` + // Indicates whether a dashboard is trashed. Trashed dashboards won't appear + // in list views. If this boolean is `true`, the `options` property for this + // dashboard includes a `moved_to_trash_at` timestamp. Items in trash are + // permanently deleted after 30 days. + IsArchived types.Bool `tfsdk:"is_archived"` + // Whether a dashboard is a draft. Draft dashboards only appear in list + // views for their owners. + IsDraft types.Bool `tfsdk:"is_draft"` + // Indicates whether this query object appears in the current user's + // favorites list. This flag determines whether the star icon for favorites + // is selected. + IsFavorite types.Bool `tfsdk:"is_favorite"` + // The title of the dashboard that appears in list views and at the top of + // the dashboard page. + Name types.String `tfsdk:"name"` + + Options *DashboardOptions `tfsdk:"options"` + // The identifier of the workspace folder containing the object. + Parent types.String `tfsdk:"parent"` + // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * + // `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query + PermissionTier PermissionLevel `tfsdk:"permission_tier"` + // URL slug. Usually mirrors the query name with dashes (`-`) instead of + // spaces. Appears in the URL for this query. + Slug types.String `tfsdk:"slug"` + + Tags []types.String `tfsdk:"tags"` + // Timestamp when this dashboard was last updated. + UpdatedAt types.String `tfsdk:"updated_at"` + + User *User `tfsdk:"user"` + // The ID of the user who owns the dashboard. + UserId types.Int64 `tfsdk:"user_id"` + + Widgets []Widget `tfsdk:"widgets"` +} + +type DashboardEditContent struct { + DashboardId types.String `tfsdk:"-" url:"-"` + // The title of this dashboard that appears in list views and at the top of + // the dashboard page. + Name types.String `tfsdk:"name"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `tfsdk:"run_as_role"` + + Tags []types.String `tfsdk:"tags"` +} + +type DashboardOptions struct { + // The timestamp when this dashboard was moved to trash. Only present when + // the `is_archived` property is `true`. Trashed items are deleted after + // thirty days. + MovedToTrashAt types.String `tfsdk:"moved_to_trash_at"` +} + +type DashboardPostContent struct { + // Indicates whether the dashboard filters are enabled + DashboardFiltersEnabled types.Bool `tfsdk:"dashboard_filters_enabled"` + // Indicates whether this dashboard object should appear in the current + // user's favorites list. + IsFavorite types.Bool `tfsdk:"is_favorite"` + // The title of this dashboard that appears in list views and at the top of + // the dashboard page. + Name types.String `tfsdk:"name"` + // The identifier of the workspace folder containing the object. + Parent types.String `tfsdk:"parent"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `tfsdk:"run_as_role"` + + Tags []types.String `tfsdk:"tags"` +} + +// A JSON object representing a DBSQL data source / SQL warehouse. +type DataSource struct { + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + Id types.String `tfsdk:"id"` + // The string name of this data source / SQL warehouse as it appears in the + // Databricks SQL web application. + Name types.String `tfsdk:"name"` + // Reserved for internal use. + PauseReason types.String `tfsdk:"pause_reason"` + // Reserved for internal use. + Paused types.Int64 `tfsdk:"paused"` + // Reserved for internal use. + SupportsAutoLimit types.Bool `tfsdk:"supports_auto_limit"` + // Reserved for internal use. + Syntax types.String `tfsdk:"syntax"` + // The type of data source. For SQL warehouses, this will be + // `databricks_internal`. + Type types.String `tfsdk:"type"` + // Reserved for internal use. + ViewOnly types.Bool `tfsdk:"view_only"` + // The ID of the associated SQL warehouse, if this data source is backed by + // a SQL warehouse. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +// Delete an alert +type DeleteAlertRequest struct { + AlertId types.String `tfsdk:"-" url:"-"` +} + +// Remove a dashboard +type DeleteDashboardRequest struct { + DashboardId types.String `tfsdk:"-" url:"-"` +} + +// Remove widget +type DeleteDashboardWidgetRequest struct { + // Widget ID returned by :method:dashboardwidgets/create + Id types.String `tfsdk:"-" url:"-"` +} + +// Delete a query +type DeleteQueryRequest struct { + QueryId types.String `tfsdk:"-" url:"-"` +} + +// Remove visualization +type DeleteQueryVisualizationRequest struct { + // Widget ID returned by :method:queryvizualisations/create + Id types.String `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +// Delete a warehouse +type DeleteWarehouseRequest struct { + // Required. Id of the SQL warehouse. + Id types.String `tfsdk:"-" url:"-"` +} + +type DeleteWarehouseResponse struct { +} + +// The fetch disposition provides two modes of fetching results: `INLINE` and +// `EXTERNAL_LINKS`. +// +// Statements executed with `INLINE` disposition will return result data inline, +// in `JSON_ARRAY` format, in a series of chunks. If a given statement produces +// a result set with a size larger than 25 MiB, that statement execution is +// aborted, and no result set will be available. +// +// **NOTE** Byte limits are computed based upon internal representations of the +// result set data, and might not match the sizes visible in JSON responses. +// +// Statements executed with `EXTERNAL_LINKS` disposition will return result data +// as external links: URLs that point to cloud storage internal to the +// workspace. Using `EXTERNAL_LINKS` disposition allows statements to generate +// arbitrarily sized result sets for fetching up to 100 GiB. The resulting links +// have two important properties: +// +// 1. They point to resources _external_ to the Databricks compute; therefore +// any associated authentication information (typically a personal access token, +// OAuth token, or similar) _must be removed_ when fetching from these links. +// +// 2. These are presigned URLs with a specific expiration, indicated in the +// response. The behavior when attempting to use an expired link is cloud +// specific. +type Disposition string + +const DispositionExternalLinks Disposition = `EXTERNAL_LINKS` + +const DispositionInline Disposition = `INLINE` + +// String representation for [fmt.Print] +func (f *Disposition) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Disposition) Set(v string) error { + switch v { + case `EXTERNAL_LINKS`, `INLINE`: + *f = Disposition(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EXTERNAL_LINKS", "INLINE"`, v) + } +} + +// Type always returns Disposition to satisfy [pflag.Value] interface +func (f *Disposition) Type() string { + return "Disposition" +} + +type EditAlert struct { + AlertId types.String `tfsdk:"-" url:"-"` + // Name of the alert. + Name types.String `tfsdk:"name"` + // Alert configuration options. + Options AlertOptions `tfsdk:"options"` + // Query ID. + QueryId types.String `tfsdk:"query_id"` + // Number of seconds after being triggered before the alert rearms itself + // and can be triggered again. If `null`, alert will never be triggered + // again. + Rearm types.Int64 `tfsdk:"rearm"` +} + +type EditWarehouseRequest struct { + // The amount of time in minutes that a SQL warehouse must be idle (i.e., no + // RUNNING queries) before it is automatically stopped. + // + // Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + // + // Defaults to 120 mins + AutoStopMins types.Int64 `tfsdk:"auto_stop_mins"` + // Channel Details + Channel *Channel `tfsdk:"channel"` + // Size of the clusters allocated for this warehouse. Increasing the size of + // a spark cluster allows you to run larger queries on it. If you want to + // increase the number of concurrent queries, please tune max_num_clusters. + // + // Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large + // - 2X-Large - 3X-Large - 4X-Large + ClusterSize types.String `tfsdk:"cluster_size"` + // warehouse creator name + CreatorName types.String `tfsdk:"creator_name"` + // Configures whether the warehouse should use Photon optimized clusters. + // + // Defaults to false. + EnablePhoton types.Bool `tfsdk:"enable_photon"` + // Configures whether the warehouse should use serverless compute. + EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute"` + // Required. Id of the warehouse to configure. + Id types.String `tfsdk:"-" url:"-"` + // Deprecated. Instance profile used to pass IAM role to the cluster + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // Maximum number of clusters that the autoscaler will create to handle + // concurrent queries. + // + // Supported values: - Must be >= min_num_clusters - Must be <= 30. + // + // Defaults to min_clusters if unset. + MaxNumClusters types.Int64 `tfsdk:"max_num_clusters"` + // Minimum number of available clusters that will be maintained for this SQL + // warehouse. Increasing this will ensure that a larger number of clusters + // are always running and therefore may reduce the cold start time for new + // queries. This is similar to reserved vs. revocable cores in a resource + // manager. + // + // Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) + // + // Defaults to 1 + MinNumClusters types.Int64 `tfsdk:"min_num_clusters"` + // Logical name for the cluster. + // + // Supported values: - Must be unique within an org. - Must be less than 100 + // characters. + Name types.String `tfsdk:"name"` + // Configurations whether the warehouse should use spot instances. + SpotInstancePolicy SpotInstancePolicy `tfsdk:"spot_instance_policy"` + // A set of key-value pairs that will be tagged on all resources (e.g., AWS + // instances and EBS volumes) associated with this SQL warehouse. + // + // Supported values: - Number of tags < 45. + Tags *EndpointTags `tfsdk:"tags"` + // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless + // compute, you must set to `PRO` and also set the field + // `enable_serverless_compute` to `true`. + WarehouseType EditWarehouseRequestWarehouseType `tfsdk:"warehouse_type"` +} + +// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, +// you must set to `PRO` and also set the field `enable_serverless_compute` to +// `true`. +type EditWarehouseRequestWarehouseType string + +const EditWarehouseRequestWarehouseTypeClassic EditWarehouseRequestWarehouseType = `CLASSIC` + +const EditWarehouseRequestWarehouseTypePro EditWarehouseRequestWarehouseType = `PRO` + +const EditWarehouseRequestWarehouseTypeTypeUnspecified EditWarehouseRequestWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *EditWarehouseRequestWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EditWarehouseRequestWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = EditWarehouseRequestWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns EditWarehouseRequestWarehouseType to satisfy [pflag.Value] interface +func (f *EditWarehouseRequestWarehouseType) Type() string { + return "EditWarehouseRequestWarehouseType" +} + +type EditWarehouseResponse struct { +} + +type EndpointConfPair struct { + Key types.String `tfsdk:"key"` + + Value types.String `tfsdk:"value"` +} + +type EndpointHealth struct { + // Details about errors that are causing current degraded/failed status. + Details types.String `tfsdk:"details"` + // The reason for failure to bring up clusters for this warehouse. This is + // available when status is 'FAILED' and sometimes when it is DEGRADED. + FailureReason *TerminationReason `tfsdk:"failure_reason"` + // Deprecated. split into summary and details for security + Message types.String `tfsdk:"message"` + // Health status of the warehouse. + Status Status `tfsdk:"status"` + // A short summary of the health status in case of degraded/failed + // warehouses. + Summary types.String `tfsdk:"summary"` +} + +type EndpointInfo struct { + // The amount of time in minutes that a SQL warehouse must be idle (i.e., no + // RUNNING queries) before it is automatically stopped. + // + // Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + // + // Defaults to 120 mins + AutoStopMins types.Int64 `tfsdk:"auto_stop_mins"` + // Channel Details + Channel *Channel `tfsdk:"channel"` + // Size of the clusters allocated for this warehouse. Increasing the size of + // a spark cluster allows you to run larger queries on it. If you want to + // increase the number of concurrent queries, please tune max_num_clusters. + // + // Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large + // - 2X-Large - 3X-Large - 4X-Large + ClusterSize types.String `tfsdk:"cluster_size"` + // warehouse creator name + CreatorName types.String `tfsdk:"creator_name"` + // Configures whether the warehouse should use Photon optimized clusters. + // + // Defaults to false. + EnablePhoton types.Bool `tfsdk:"enable_photon"` + // Configures whether the warehouse should use serverless compute + EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute"` + // Optional health status. Assume the warehouse is healthy if this field is + // not set. + Health *EndpointHealth `tfsdk:"health"` + // unique identifier for warehouse + Id types.String `tfsdk:"id"` + // Deprecated. Instance profile used to pass IAM role to the cluster + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // the jdbc connection string for this warehouse + JdbcUrl types.String `tfsdk:"jdbc_url"` + // Maximum number of clusters that the autoscaler will create to handle + // concurrent queries. + // + // Supported values: - Must be >= min_num_clusters - Must be <= 30. + // + // Defaults to min_clusters if unset. + MaxNumClusters types.Int64 `tfsdk:"max_num_clusters"` + // Minimum number of available clusters that will be maintained for this SQL + // warehouse. Increasing this will ensure that a larger number of clusters + // are always running and therefore may reduce the cold start time for new + // queries. This is similar to reserved vs. revocable cores in a resource + // manager. + // + // Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) + // + // Defaults to 1 + MinNumClusters types.Int64 `tfsdk:"min_num_clusters"` + // Logical name for the cluster. + // + // Supported values: - Must be unique within an org. - Must be less than 100 + // characters. + Name types.String `tfsdk:"name"` + // current number of active sessions for the warehouse + NumActiveSessions types.Int64 `tfsdk:"num_active_sessions"` + // current number of clusters running for the service + NumClusters types.Int64 `tfsdk:"num_clusters"` + // ODBC parameters for the SQL warehouse + OdbcParams *OdbcParams `tfsdk:"odbc_params"` + // Configurations whether the warehouse should use spot instances. + SpotInstancePolicy SpotInstancePolicy `tfsdk:"spot_instance_policy"` + // State of the warehouse + State State `tfsdk:"state"` + // A set of key-value pairs that will be tagged on all resources (e.g., AWS + // instances and EBS volumes) associated with this SQL warehouse. + // + // Supported values: - Number of tags < 45. + Tags *EndpointTags `tfsdk:"tags"` + // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless + // compute, you must set to `PRO` and also set the field + // `enable_serverless_compute` to `true`. + WarehouseType EndpointInfoWarehouseType `tfsdk:"warehouse_type"` +} + +// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, +// you must set to `PRO` and also set the field `enable_serverless_compute` to +// `true`. +type EndpointInfoWarehouseType string + +const EndpointInfoWarehouseTypeClassic EndpointInfoWarehouseType = `CLASSIC` + +const EndpointInfoWarehouseTypePro EndpointInfoWarehouseType = `PRO` + +const EndpointInfoWarehouseTypeTypeUnspecified EndpointInfoWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *EndpointInfoWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointInfoWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = EndpointInfoWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns EndpointInfoWarehouseType to satisfy [pflag.Value] interface +func (f *EndpointInfoWarehouseType) Type() string { + return "EndpointInfoWarehouseType" +} + +type EndpointTagPair struct { + Key types.String `tfsdk:"key"` + + Value types.String `tfsdk:"value"` +} + +type EndpointTags struct { + CustomTags []EndpointTagPair `tfsdk:"custom_tags"` +} + +type ExecuteStatementRequest struct { + // Applies the given byte limit to the statement's result size. Byte counts + // are based on internal data representations and might not match the final + // size in the requested `format`. If the result was truncated due to the + // byte limit, then `truncated` in the response is set to `true`. When using + // `EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is + // applied if `byte_limit` is not explcitly set. + ByteLimit types.Int64 `tfsdk:"byte_limit"` + // Sets default catalog for statement execution, similar to [`USE CATALOG`] + // in SQL. + // + // [`USE CATALOG`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html + Catalog types.String `tfsdk:"catalog"` + // The fetch disposition provides two modes of fetching results: `INLINE` + // and `EXTERNAL_LINKS`. + // + // Statements executed with `INLINE` disposition will return result data + // inline, in `JSON_ARRAY` format, in a series of chunks. If a given + // statement produces a result set with a size larger than 25 MiB, that + // statement execution is aborted, and no result set will be available. + // + // **NOTE** Byte limits are computed based upon internal representations of + // the result set data, and might not match the sizes visible in JSON + // responses. + // + // Statements executed with `EXTERNAL_LINKS` disposition will return result + // data as external links: URLs that point to cloud storage internal to the + // workspace. Using `EXTERNAL_LINKS` disposition allows statements to + // generate arbitrarily sized result sets for fetching up to 100 GiB. The + // resulting links have two important properties: + // + // 1. They point to resources _external_ to the Databricks compute; + // therefore any associated authentication information (typically a personal + // access token, OAuth token, or similar) _must be removed_ when fetching + // from these links. + // + // 2. These are presigned URLs with a specific expiration, indicated in the + // response. The behavior when attempting to use an expired link is cloud + // specific. + Disposition Disposition `tfsdk:"disposition"` + // Statement execution supports three result formats: `JSON_ARRAY` + // (default), `ARROW_STREAM`, and `CSV`. + // + // Important: The formats `ARROW_STREAM` and `CSV` are supported only with + // `EXTERNAL_LINKS` disposition. `JSON_ARRAY` is supported in `INLINE` and + // `EXTERNAL_LINKS` disposition. + // + // When specifying `format=JSON_ARRAY`, result data will be formatted as an + // array of arrays of values, where each value is either the *string + // representation* of a value, or `null`. For example, the output of `SELECT + // concat('id-', id) AS strCol, id AS intCol, null AS nullCol FROM range(3)` + // would look like this: + // + // ``` [ [ "id-1", "1", null ], [ "id-2", "2", null ], [ "id-3", "3", null + // ], ] ``` + // + // When specifying `format=JSON_ARRAY` and `disposition=EXTERNAL_LINKS`, + // each chunk in the result contains compact JSON with no indentation or + // extra whitespace. + // + // When specifying `format=ARROW_STREAM` and `disposition=EXTERNAL_LINKS`, + // each chunk in the result will be formatted as Apache Arrow Stream. See + // the [Apache Arrow streaming format]. + // + // When specifying `format=CSV` and `disposition=EXTERNAL_LINKS`, each chunk + // in the result will be a CSV according to [RFC 4180] standard. All the + // columns values will have *string representation* similar to the + // `JSON_ARRAY` format, and `null` values will be encoded as “null”. + // Only the first chunk in the result would contain a header row with column + // names. For example, the output of `SELECT concat('id-', id) AS strCol, id + // AS intCol, null as nullCol FROM range(3)` would look like this: + // + // ``` strCol,intCol,nullCol id-1,1,null id-2,2,null id-3,3,null ``` + // + // [Apache Arrow streaming format]: https://arrow.apache.org/docs/format/Columnar.html#ipc-streaming-format + // [RFC 4180]: https://www.rfc-editor.org/rfc/rfc4180 + Format Format `tfsdk:"format"` + // When `wait_timeout > 0s`, the call will block up to the specified time. + // If the statement execution doesn't finish within this time, + // `on_wait_timeout` determines whether the execution should continue or be + // canceled. When set to `CONTINUE`, the statement execution continues + // asynchronously and the call returns a statement ID which can be used for + // polling with :method:statementexecution/getStatement. When set to + // `CANCEL`, the statement execution is canceled and the call returns with a + // `CANCELED` state. + OnWaitTimeout ExecuteStatementRequestOnWaitTimeout `tfsdk:"on_wait_timeout"` + // A list of parameters to pass into a SQL statement containing parameter + // markers. A parameter consists of a name, a value, and optionally a type. + // To represent a NULL value, the `value` field may be omitted or set to + // `null` explicitly. If the `type` field is omitted, the value is + // interpreted as a string. + // + // If the type is given, parameters will be checked for type correctness + // according to the given type. A value is correct if the provided string + // can be converted to the requested type using the `cast` function. The + // exact semantics are described in the section [`cast` function] of the SQL + // language reference. + // + // For example, the following statement contains two parameters, `my_name` + // and `my_date`: + // + // SELECT * FROM my_table WHERE name = :my_name AND date = :my_date + // + // The parameters can be passed in the request body as follows: + // + // { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND + // date = :my_date", "parameters": [ { "name": "my_name", "value": "the + // name" }, { "name": "my_date", "value": "2020-01-01", "type": "DATE" } ] } + // + // Currently, positional parameters denoted by a `?` marker are not + // supported by the Databricks SQL Statement Execution API. + // + // Also see the section [Parameter markers] of the SQL language reference. + // + // [Parameter markers]: https://docs.databricks.com/sql/language-manual/sql-ref-parameter-marker.html + // [`cast` function]: https://docs.databricks.com/sql/language-manual/functions/cast.html + Parameters []StatementParameterListItem `tfsdk:"parameters"` + // Applies the given row limit to the statement's result set, but unlike the + // `LIMIT` clause in SQL, it also sets the `truncated` field in the response + // to indicate whether the result was trimmed due to the limit or not. + RowLimit types.Int64 `tfsdk:"row_limit"` + // Sets default schema for statement execution, similar to [`USE SCHEMA`] in + // SQL. + // + // [`USE SCHEMA`]: https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-schema.html + Schema types.String `tfsdk:"schema"` + // The SQL statement to execute. The statement can optionally be + // parameterized, see `parameters`. + Statement types.String `tfsdk:"statement"` + // The time in seconds the call will wait for the statement's result set as + // `Ns`, where `N` can be set to 0 or to a value between 5 and 50. + // + // When set to `0s`, the statement will execute in asynchronous mode and the + // call will not wait for the execution to finish. In this case, the call + // returns directly with `PENDING` state and a statement ID which can be + // used for polling with :method:statementexecution/getStatement. + // + // When set between 5 and 50 seconds, the call will behave synchronously up + // to this timeout and wait for the statement execution to finish. If the + // execution finishes within this time, the call returns immediately with a + // manifest and result data (or a `FAILED` state in case of an execution + // error). If the statement takes longer to execute, `on_wait_timeout` + // determines what should happen after the timeout is reached. + WaitTimeout types.String `tfsdk:"wait_timeout"` + // Warehouse upon which to execute a statement. See also [What are SQL + // warehouses?] + // + // [What are SQL warehouses?]: https://docs.databricks.com/sql/admin/warehouse-type.html + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +// When `wait_timeout > 0s`, the call will block up to the specified time. If +// the statement execution doesn't finish within this time, `on_wait_timeout` +// determines whether the execution should continue or be canceled. When set to +// `CONTINUE`, the statement execution continues asynchronously and the call +// returns a statement ID which can be used for polling with +// :method:statementexecution/getStatement. When set to `CANCEL`, the statement +// execution is canceled and the call returns with a `CANCELED` state. +type ExecuteStatementRequestOnWaitTimeout string + +const ExecuteStatementRequestOnWaitTimeoutCancel ExecuteStatementRequestOnWaitTimeout = `CANCEL` + +const ExecuteStatementRequestOnWaitTimeoutContinue ExecuteStatementRequestOnWaitTimeout = `CONTINUE` + +// String representation for [fmt.Print] +func (f *ExecuteStatementRequestOnWaitTimeout) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExecuteStatementRequestOnWaitTimeout) Set(v string) error { + switch v { + case `CANCEL`, `CONTINUE`: + *f = ExecuteStatementRequestOnWaitTimeout(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCEL", "CONTINUE"`, v) + } +} + +// Type always returns ExecuteStatementRequestOnWaitTimeout to satisfy [pflag.Value] interface +func (f *ExecuteStatementRequestOnWaitTimeout) Type() string { + return "ExecuteStatementRequestOnWaitTimeout" +} + +type ExecuteStatementResponse struct { + // The result manifest provides schema and metadata for the result set. + Manifest *ResultManifest `tfsdk:"manifest"` + // Contains the result data of a single chunk when using `INLINE` + // disposition. When using `EXTERNAL_LINKS` disposition, the array + // `external_links` is used instead to provide presigned URLs to the result + // data in cloud storage. Exactly one of these alternatives is used. (While + // the `external_links` array prepares the API to return multiple links in a + // single response. Currently only a single link is returned.) + Result *ResultData `tfsdk:"result"` + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId types.String `tfsdk:"statement_id"` + // The status response includes execution state and if relevant, error + // information. + Status *StatementStatus `tfsdk:"status"` +} + +type ExternalLink struct { + // The number of bytes in the result chunk. This field is not available when + // using `INLINE` disposition. + ByteCount types.Int64 `tfsdk:"byte_count"` + // The position within the sequence of result set chunks. + ChunkIndex types.Int64 `tfsdk:"chunk_index"` + // Indicates the date-time that the given external link will expire and + // becomes invalid, after which point a new `external_link` must be + // requested. + Expiration types.String `tfsdk:"expiration"` + // A presigned URL pointing to a chunk of result data, hosted by an external + // service, with a short expiration time (<= 15 minutes). As this URL + // contains a temporary credential, it should be considered sensitive and + // the client should not expose this URL in a log. + ExternalLink types.String `tfsdk:"external_link"` + // HTTP headers that must be included with a GET request to the + // `external_link`. Each header is provided as a key-value pair. Headers are + // typically used to pass a decryption key to the external service. The + // values of these headers should be considered sensitive and the client + // should not expose these values in a log. + HttpHeaders map[string]types.String `tfsdk:"http_headers"` + // When fetching, provides the `chunk_index` for the _next_ chunk. If + // absent, indicates there are no more chunks. The next chunk can be fetched + // with a :method:statementexecution/getStatementResultChunkN request. + NextChunkIndex types.Int64 `tfsdk:"next_chunk_index"` + // When fetching, provides a link to fetch the _next_ chunk. If absent, + // indicates there are no more chunks. This link is an absolute `path` to be + // joined with your `$DATABRICKS_HOST`, and should be treated as an opaque + // link. This is an alternative to using `next_chunk_index`. + NextChunkInternalLink types.String `tfsdk:"next_chunk_internal_link"` + // The number of rows within the result chunk. + RowCount types.Int64 `tfsdk:"row_count"` + // The starting row offset within the result set. + RowOffset types.Int64 `tfsdk:"row_offset"` +} + +type Format string + +const FormatArrowStream Format = `ARROW_STREAM` + +const FormatCsv Format = `CSV` + +const FormatJsonArray Format = `JSON_ARRAY` + +// String representation for [fmt.Print] +func (f *Format) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Format) Set(v string) error { + switch v { + case `ARROW_STREAM`, `CSV`, `JSON_ARRAY`: + *f = Format(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARROW_STREAM", "CSV", "JSON_ARRAY"`, v) + } +} + +// Type always returns Format to satisfy [pflag.Value] interface +func (f *Format) Type() string { + return "Format" +} + +// Get an alert +type GetAlertRequest struct { + AlertId types.String `tfsdk:"-" url:"-"` +} + +// Retrieve a definition +type GetDashboardRequest struct { + DashboardId types.String `tfsdk:"-" url:"-"` +} + +// Get object ACL +type GetDbsqlPermissionRequest struct { + // Object ID. An ACL is returned for the object with this UUID. + ObjectId types.String `tfsdk:"-" url:"-"` + // The type of object permissions to check. + ObjectType ObjectTypePlural `tfsdk:"-" url:"-"` +} + +// Get a query definition. +type GetQueryRequest struct { + QueryId types.String `tfsdk:"-" url:"-"` +} + +type GetResponse struct { + AccessControlList []AccessControl `tfsdk:"access_control_list"` + // An object's type and UUID, separated by a forward slash (/) character. + ObjectId types.String `tfsdk:"object_id"` + // A singular noun object type. + ObjectType ObjectType `tfsdk:"object_type"` +} + +// Get status, manifest, and result first chunk +type GetStatementRequest struct { + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId types.String `tfsdk:"-" url:"-"` +} + +type GetStatementResponse struct { + // The result manifest provides schema and metadata for the result set. + Manifest *ResultManifest `tfsdk:"manifest"` + // Contains the result data of a single chunk when using `INLINE` + // disposition. When using `EXTERNAL_LINKS` disposition, the array + // `external_links` is used instead to provide presigned URLs to the result + // data in cloud storage. Exactly one of these alternatives is used. (While + // the `external_links` array prepares the API to return multiple links in a + // single response. Currently only a single link is returned.) + Result *ResultData `tfsdk:"result"` + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId types.String `tfsdk:"statement_id"` + // The status response includes execution state and if relevant, error + // information. + Status *StatementStatus `tfsdk:"status"` +} + +// Get result chunk by index +type GetStatementResultChunkNRequest struct { + ChunkIndex types.Int64 `tfsdk:"-" url:"-"` + // The statement ID is returned upon successfully submitting a SQL + // statement, and is a required reference for all subsequent calls. + StatementId types.String `tfsdk:"-" url:"-"` +} + +// Get SQL warehouse permission levels +type GetWarehousePermissionLevelsRequest struct { + // The SQL warehouse for which to get or manage permissions. + WarehouseId types.String `tfsdk:"-" url:"-"` +} + +type GetWarehousePermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []WarehousePermissionsDescription `tfsdk:"permission_levels"` +} + +// Get SQL warehouse permissions +type GetWarehousePermissionsRequest struct { + // The SQL warehouse for which to get or manage permissions. + WarehouseId types.String `tfsdk:"-" url:"-"` +} + +// Get warehouse info +type GetWarehouseRequest struct { + // Required. Id of the SQL warehouse. + Id types.String `tfsdk:"-" url:"-"` +} + +type GetWarehouseResponse struct { + // The amount of time in minutes that a SQL warehouse must be idle (i.e., no + // RUNNING queries) before it is automatically stopped. + // + // Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + // + // Defaults to 120 mins + AutoStopMins types.Int64 `tfsdk:"auto_stop_mins"` + // Channel Details + Channel *Channel `tfsdk:"channel"` + // Size of the clusters allocated for this warehouse. Increasing the size of + // a spark cluster allows you to run larger queries on it. If you want to + // increase the number of concurrent queries, please tune max_num_clusters. + // + // Supported values: - 2X-Small - X-Small - Small - Medium - Large - X-Large + // - 2X-Large - 3X-Large - 4X-Large + ClusterSize types.String `tfsdk:"cluster_size"` + // warehouse creator name + CreatorName types.String `tfsdk:"creator_name"` + // Configures whether the warehouse should use Photon optimized clusters. + // + // Defaults to false. + EnablePhoton types.Bool `tfsdk:"enable_photon"` + // Configures whether the warehouse should use serverless compute + EnableServerlessCompute types.Bool `tfsdk:"enable_serverless_compute"` + // Optional health status. Assume the warehouse is healthy if this field is + // not set. + Health *EndpointHealth `tfsdk:"health"` + // unique identifier for warehouse + Id types.String `tfsdk:"id"` + // Deprecated. Instance profile used to pass IAM role to the cluster + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // the jdbc connection string for this warehouse + JdbcUrl types.String `tfsdk:"jdbc_url"` + // Maximum number of clusters that the autoscaler will create to handle + // concurrent queries. + // + // Supported values: - Must be >= min_num_clusters - Must be <= 30. + // + // Defaults to min_clusters if unset. + MaxNumClusters types.Int64 `tfsdk:"max_num_clusters"` + // Minimum number of available clusters that will be maintained for this SQL + // warehouse. Increasing this will ensure that a larger number of clusters + // are always running and therefore may reduce the cold start time for new + // queries. This is similar to reserved vs. revocable cores in a resource + // manager. + // + // Supported values: - Must be > 0 - Must be <= min(max_num_clusters, 30) + // + // Defaults to 1 + MinNumClusters types.Int64 `tfsdk:"min_num_clusters"` + // Logical name for the cluster. + // + // Supported values: - Must be unique within an org. - Must be less than 100 + // characters. + Name types.String `tfsdk:"name"` + // current number of active sessions for the warehouse + NumActiveSessions types.Int64 `tfsdk:"num_active_sessions"` + // current number of clusters running for the service + NumClusters types.Int64 `tfsdk:"num_clusters"` + // ODBC parameters for the SQL warehouse + OdbcParams *OdbcParams `tfsdk:"odbc_params"` + // Configurations whether the warehouse should use spot instances. + SpotInstancePolicy SpotInstancePolicy `tfsdk:"spot_instance_policy"` + // State of the warehouse + State State `tfsdk:"state"` + // A set of key-value pairs that will be tagged on all resources (e.g., AWS + // instances and EBS volumes) associated with this SQL warehouse. + // + // Supported values: - Number of tags < 45. + Tags *EndpointTags `tfsdk:"tags"` + // Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless + // compute, you must set to `PRO` and also set the field + // `enable_serverless_compute` to `true`. + WarehouseType GetWarehouseResponseWarehouseType `tfsdk:"warehouse_type"` +} + +// Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, +// you must set to `PRO` and also set the field `enable_serverless_compute` to +// `true`. +type GetWarehouseResponseWarehouseType string + +const GetWarehouseResponseWarehouseTypeClassic GetWarehouseResponseWarehouseType = `CLASSIC` + +const GetWarehouseResponseWarehouseTypePro GetWarehouseResponseWarehouseType = `PRO` + +const GetWarehouseResponseWarehouseTypeTypeUnspecified GetWarehouseResponseWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *GetWarehouseResponseWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetWarehouseResponseWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = GetWarehouseResponseWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns GetWarehouseResponseWarehouseType to satisfy [pflag.Value] interface +func (f *GetWarehouseResponseWarehouseType) Type() string { + return "GetWarehouseResponseWarehouseType" +} + +type GetWorkspaceWarehouseConfigResponse struct { + // Optional: Channel selection details + Channel *Channel `tfsdk:"channel"` + // Deprecated: Use sql_configuration_parameters + ConfigParam *RepeatedEndpointConfPairs `tfsdk:"config_param"` + // Spark confs for external hive metastore configuration JSON serialized + // size must be less than <= 512K + DataAccessConfig []EndpointConfPair `tfsdk:"data_access_config"` + // List of Warehouse Types allowed in this workspace (limits allowed value + // of the type field in CreateWarehouse and EditWarehouse). Note: Some types + // cannot be disabled, they don't need to be specified in + // SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing + // warehouses to be converted to another type. Used by frontend to save + // specific type availability in the warehouse create and edit form UI. + EnabledWarehouseTypes []WarehouseTypePair `tfsdk:"enabled_warehouse_types"` + // Deprecated: Use sql_configuration_parameters + GlobalParam *RepeatedEndpointConfPairs `tfsdk:"global_param"` + // GCP only: Google Service Account used to pass to cluster to access Google + // Cloud Storage + GoogleServiceAccount types.String `tfsdk:"google_service_account"` + // AWS Only: Instance profile used to pass IAM role to the cluster + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // Security policy for warehouses + SecurityPolicy GetWorkspaceWarehouseConfigResponseSecurityPolicy `tfsdk:"security_policy"` + // SQL configuration parameters + SqlConfigurationParameters *RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters"` +} + +// Security policy for warehouses +type GetWorkspaceWarehouseConfigResponseSecurityPolicy string + +const GetWorkspaceWarehouseConfigResponseSecurityPolicyDataAccessControl GetWorkspaceWarehouseConfigResponseSecurityPolicy = `DATA_ACCESS_CONTROL` + +const GetWorkspaceWarehouseConfigResponseSecurityPolicyNone GetWorkspaceWarehouseConfigResponseSecurityPolicy = `NONE` + +const GetWorkspaceWarehouseConfigResponseSecurityPolicyPassthrough GetWorkspaceWarehouseConfigResponseSecurityPolicy = `PASSTHROUGH` + +// String representation for [fmt.Print] +func (f *GetWorkspaceWarehouseConfigResponseSecurityPolicy) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *GetWorkspaceWarehouseConfigResponseSecurityPolicy) Set(v string) error { + switch v { + case `DATA_ACCESS_CONTROL`, `NONE`, `PASSTHROUGH`: + *f = GetWorkspaceWarehouseConfigResponseSecurityPolicy(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATA_ACCESS_CONTROL", "NONE", "PASSTHROUGH"`, v) + } +} + +// Type always returns GetWorkspaceWarehouseConfigResponseSecurityPolicy to satisfy [pflag.Value] interface +func (f *GetWorkspaceWarehouseConfigResponseSecurityPolicy) Type() string { + return "GetWorkspaceWarehouseConfigResponseSecurityPolicy" +} + +// Get dashboard objects +type ListDashboardsRequest struct { + // Name of dashboard attribute to order by. + Order ListOrder `tfsdk:"-" url:"order,omitempty"` + // Page number to retrieve. + Page types.Int64 `tfsdk:"-" url:"page,omitempty"` + // Number of dashboards to return per page. + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + // Full text search term. + Q types.String `tfsdk:"-" url:"q,omitempty"` +} + +type ListOrder string + +const ListOrderCreatedAt ListOrder = `created_at` + +const ListOrderName ListOrder = `name` + +// String representation for [fmt.Print] +func (f *ListOrder) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ListOrder) Set(v string) error { + switch v { + case `created_at`, `name`: + *f = ListOrder(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "created_at", "name"`, v) + } +} + +// Type always returns ListOrder to satisfy [pflag.Value] interface +func (f *ListOrder) Type() string { + return "ListOrder" +} + +// Get a list of queries +type ListQueriesRequest struct { + // Name of query attribute to order by. Default sort order is ascending. + // Append a dash (`-`) to order descending instead. + // + // - `name`: The name of the query. + // + // - `created_at`: The timestamp the query was created. + // + // - `runtime`: The time it took to run this query. This is blank for + // parameterized queries. A blank value is treated as the highest value for + // sorting. + // + // - `executed_at`: The timestamp when the query was last run. + // + // - `created_by`: The user name of the user that created the query. + Order types.String `tfsdk:"-" url:"order,omitempty"` + // Page number to retrieve. + Page types.Int64 `tfsdk:"-" url:"page,omitempty"` + // Number of queries to return per page. + PageSize types.Int64 `tfsdk:"-" url:"page_size,omitempty"` + // Full text search term + Q types.String `tfsdk:"-" url:"q,omitempty"` +} + +type ListQueriesResponse struct { + // Whether there is another page of results. + HasNextPage types.Bool `tfsdk:"has_next_page"` + // A token that can be used to get the next page of results. + NextPageToken types.String `tfsdk:"next_page_token"` + + Res []QueryInfo `tfsdk:"res"` +} + +// List Queries +type ListQueryHistoryRequest struct { + // A filter to limit query history results. This field is optional. + FilterBy *QueryFilter `tfsdk:"-" url:"filter_by,omitempty"` + // Whether to include metrics about query. + IncludeMetrics types.Bool `tfsdk:"-" url:"include_metrics,omitempty"` + // Limit the number of results returned in one page. The default is 100. + MaxResults types.Int64 `tfsdk:"-" url:"max_results,omitempty"` + // A token that can be used to get the next page of results. The token can + // contains characters that need to be encoded before using it in a URL. For + // example, the character '+' needs to be replaced by %2B. + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListResponse struct { + // The total number of dashboards. + Count types.Int64 `tfsdk:"count"` + // The current page being displayed. + Page types.Int64 `tfsdk:"page"` + // The number of dashboards per page. + PageSize types.Int64 `tfsdk:"page_size"` + // List of dashboards returned. + Results []Dashboard `tfsdk:"results"` +} + +// List warehouses +type ListWarehousesRequest struct { + // Service Principal which will be used to fetch the list of warehouses. If + // not specified, the user from the session header is used. + RunAsUserId types.Int64 `tfsdk:"-" url:"run_as_user_id,omitempty"` +} + +type ListWarehousesResponse struct { + // A list of warehouses and their configurations. + Warehouses []EndpointInfo `tfsdk:"warehouses"` +} + +// If specified, allows multiple values to be selected for this parameter. Only +// applies to dropdown list and query-based dropdown list parameters. +type MultiValuesOptions struct { + // Character that prefixes each selected parameter value. + Prefix types.String `tfsdk:"prefix"` + // Character that separates each selected parameter value. Defaults to a + // comma. + Separator types.String `tfsdk:"separator"` + // Character that suffixes each selected parameter value. + Suffix types.String `tfsdk:"suffix"` +} + +// A singular noun object type. +type ObjectType string + +const ObjectTypeAlert ObjectType = `alert` + +const ObjectTypeDashboard ObjectType = `dashboard` + +const ObjectTypeDataSource ObjectType = `data_source` + +const ObjectTypeQuery ObjectType = `query` + +// String representation for [fmt.Print] +func (f *ObjectType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ObjectType) Set(v string) error { + switch v { + case `alert`, `dashboard`, `data_source`, `query`: + *f = ObjectType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "alert", "dashboard", "data_source", "query"`, v) + } +} + +// Type always returns ObjectType to satisfy [pflag.Value] interface +func (f *ObjectType) Type() string { + return "ObjectType" +} + +// Always a plural of the object type. +type ObjectTypePlural string + +const ObjectTypePluralAlerts ObjectTypePlural = `alerts` + +const ObjectTypePluralDashboards ObjectTypePlural = `dashboards` + +const ObjectTypePluralDataSources ObjectTypePlural = `data_sources` + +const ObjectTypePluralQueries ObjectTypePlural = `queries` + +// String representation for [fmt.Print] +func (f *ObjectTypePlural) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ObjectTypePlural) Set(v string) error { + switch v { + case `alerts`, `dashboards`, `data_sources`, `queries`: + *f = ObjectTypePlural(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "alerts", "dashboards", "data_sources", "queries"`, v) + } +} + +// Type always returns ObjectTypePlural to satisfy [pflag.Value] interface +func (f *ObjectTypePlural) Type() string { + return "ObjectTypePlural" +} + +type OdbcParams struct { + Hostname types.String `tfsdk:"hostname"` + + Path types.String `tfsdk:"path"` + + Port types.Int64 `tfsdk:"port"` + + Protocol types.String `tfsdk:"protocol"` +} + +// The singular form of the type of object which can be owned. +type OwnableObjectType string + +const OwnableObjectTypeAlert OwnableObjectType = `alert` + +const OwnableObjectTypeDashboard OwnableObjectType = `dashboard` + +const OwnableObjectTypeQuery OwnableObjectType = `query` + +// String representation for [fmt.Print] +func (f *OwnableObjectType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *OwnableObjectType) Set(v string) error { + switch v { + case `alert`, `dashboard`, `query`: + *f = OwnableObjectType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "alert", "dashboard", "query"`, v) + } +} + +// Type always returns OwnableObjectType to satisfy [pflag.Value] interface +func (f *OwnableObjectType) Type() string { + return "OwnableObjectType" +} + +type Parameter struct { + // List of valid parameter values, newline delimited. Only applies for + // dropdown list parameters. + EnumOptions types.String `tfsdk:"enumOptions"` + // If specified, allows multiple values to be selected for this parameter. + // Only applies to dropdown list and query-based dropdown list parameters. + MultiValuesOptions *MultiValuesOptions `tfsdk:"multiValuesOptions"` + // The literal parameter marker that appears between double curly braces in + // the query text. + Name types.String `tfsdk:"name"` + // The UUID of the query that provides the parameter values. Only applies + // for query-based dropdown list parameters. + QueryId types.String `tfsdk:"queryId"` + // The text displayed in a parameter picking widget. + Title types.String `tfsdk:"title"` + // Parameters can have several different types. + Type ParameterType `tfsdk:"type"` + // The default value for this parameter. + Value any `tfsdk:"value"` +} + +// Parameters can have several different types. +type ParameterType string + +const ParameterTypeDatetime ParameterType = `datetime` + +const ParameterTypeEnum ParameterType = `enum` + +const ParameterTypeNumber ParameterType = `number` + +const ParameterTypeQuery ParameterType = `query` + +const ParameterTypeText ParameterType = `text` + +// String representation for [fmt.Print] +func (f *ParameterType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ParameterType) Set(v string) error { + switch v { + case `datetime`, `enum`, `number`, `query`, `text`: + *f = ParameterType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "datetime", "enum", "number", "query", "text"`, v) + } +} + +// Type always returns ParameterType to satisfy [pflag.Value] interface +func (f *ParameterType) Type() string { + return "ParameterType" +} + +// * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * `CAN_EDIT`: +// Can edit the query * `CAN_MANAGE`: Can manage the query +type PermissionLevel string + +// Can edit the query +const PermissionLevelCanEdit PermissionLevel = `CAN_EDIT` + +// Can manage the query +const PermissionLevelCanManage PermissionLevel = `CAN_MANAGE` + +// Can run the query +const PermissionLevelCanRun PermissionLevel = `CAN_RUN` + +// Can view the query +const PermissionLevelCanView PermissionLevel = `CAN_VIEW` + +// String representation for [fmt.Print] +func (f *PermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_RUN`, `CAN_VIEW`: + *f = PermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_RUN", "CAN_VIEW"`, v) + } +} + +// Type always returns PermissionLevel to satisfy [pflag.Value] interface +func (f *PermissionLevel) Type() string { + return "PermissionLevel" +} + +// Whether plans exist for the execution, or the reason why they are missing +type PlansState string + +const PlansStateEmpty PlansState = `EMPTY` + +const PlansStateExists PlansState = `EXISTS` + +const PlansStateIgnoredLargePlansSize PlansState = `IGNORED_LARGE_PLANS_SIZE` + +const PlansStateIgnoredSmallDuration PlansState = `IGNORED_SMALL_DURATION` + +const PlansStateIgnoredSparkPlanType PlansState = `IGNORED_SPARK_PLAN_TYPE` + +const PlansStateUnknown PlansState = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *PlansState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PlansState) Set(v string) error { + switch v { + case `EMPTY`, `EXISTS`, `IGNORED_LARGE_PLANS_SIZE`, `IGNORED_SMALL_DURATION`, `IGNORED_SPARK_PLAN_TYPE`, `UNKNOWN`: + *f = PlansState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EMPTY", "EXISTS", "IGNORED_LARGE_PLANS_SIZE", "IGNORED_SMALL_DURATION", "IGNORED_SPARK_PLAN_TYPE", "UNKNOWN"`, v) + } +} + +// Type always returns PlansState to satisfy [pflag.Value] interface +func (f *PlansState) Type() string { + return "PlansState" +} + +type Query struct { + // Describes whether the authenticated user is allowed to edit the + // definition of this query. + CanEdit types.Bool `tfsdk:"can_edit"` + // The timestamp when this query was created. + CreatedAt types.String `tfsdk:"created_at"` + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + DataSourceId types.String `tfsdk:"data_source_id"` + // General description that conveys additional information about this query + // such as usage notes. + Description types.String `tfsdk:"description"` + // Query ID. + Id types.String `tfsdk:"id"` + // Indicates whether the query is trashed. Trashed queries can't be used in + // dashboards, or appear in search results. If this boolean is `true`, the + // `options` property for this query includes a `moved_to_trash_at` + // timestamp. Trashed queries are permanently deleted after 30 days. + IsArchived types.Bool `tfsdk:"is_archived"` + // Whether the query is a draft. Draft queries only appear in list views for + // their owners. Visualizations from draft queries cannot appear on + // dashboards. + IsDraft types.Bool `tfsdk:"is_draft"` + // Whether this query object appears in the current user's favorites list. + // This flag determines whether the star icon for favorites is selected. + IsFavorite types.Bool `tfsdk:"is_favorite"` + // Text parameter types are not safe from SQL injection for all types of + // data source. Set this Boolean parameter to `true` if a query either does + // not use any text type parameters or uses a data source type where text + // type parameters are handled safely. + IsSafe types.Bool `tfsdk:"is_safe"` + + LastModifiedBy *User `tfsdk:"last_modified_by"` + // The ID of the user who last saved changes to this query. + LastModifiedById types.Int64 `tfsdk:"last_modified_by_id"` + // If there is a cached result for this query and user, this field includes + // the query result ID. If this query uses parameters, this field is always + // null. + LatestQueryDataId types.String `tfsdk:"latest_query_data_id"` + // The title of this query that appears in list views, widget headings, and + // on the query page. + Name types.String `tfsdk:"name"` + + Options *QueryOptions `tfsdk:"options"` + // The identifier of the workspace folder containing the object. + Parent types.String `tfsdk:"parent"` + // * `CAN_VIEW`: Can view the query * `CAN_RUN`: Can run the query * + // `CAN_EDIT`: Can edit the query * `CAN_MANAGE`: Can manage the query + PermissionTier PermissionLevel `tfsdk:"permission_tier"` + // The text of the query to be run. + Query types.String `tfsdk:"query"` + // A SHA-256 hash of the query text along with the authenticated user ID. + QueryHash types.String `tfsdk:"query_hash"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `tfsdk:"run_as_role"` + + Tags []types.String `tfsdk:"tags"` + // The timestamp at which this query was last updated. + UpdatedAt types.String `tfsdk:"updated_at"` + + User *User `tfsdk:"user"` + // The ID of the user who owns the query. + UserId types.Int64 `tfsdk:"user_id"` + + Visualizations []Visualization `tfsdk:"visualizations"` +} + +type QueryEditContent struct { + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + DataSourceId types.String `tfsdk:"data_source_id"` + // General description that conveys additional information about this query + // such as usage notes. + Description types.String `tfsdk:"description"` + // The title of this query that appears in list views, widget headings, and + // on the query page. + Name types.String `tfsdk:"name"` + // Exclusively used for storing a list parameter definitions. A parameter is + // an object with `title`, `name`, `type`, and `value` properties. The + // `value` field here is the default value. It can be overridden at runtime. + Options any `tfsdk:"options"` + // The text of the query to be run. + Query types.String `tfsdk:"query"` + + QueryId types.String `tfsdk:"-" url:"-"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `tfsdk:"run_as_role"` + + Tags []types.String `tfsdk:"tags"` +} + +// A filter to limit query history results. This field is optional. +type QueryFilter struct { + QueryStartTimeRange *TimeRange `tfsdk:"query_start_time_range" url:"query_start_time_range,omitempty"` + // A list of statement IDs. + StatementIds []types.String `tfsdk:"statement_ids" url:"statement_ids,omitempty"` + + Statuses []QueryStatus `tfsdk:"statuses" url:"statuses,omitempty"` + // A list of user IDs who ran the queries. + UserIds []types.Int64 `tfsdk:"user_ids" url:"user_ids,omitempty"` + // A list of warehouse IDs. + WarehouseIds []types.String `tfsdk:"warehouse_ids" url:"warehouse_ids,omitempty"` +} + +type QueryInfo struct { + // Reserved for internal use. + CanSubscribeToLiveQuery types.Bool `tfsdk:"canSubscribeToLiveQuery"` + // Channel information for the SQL warehouse at the time of query execution + ChannelUsed *ChannelInfo `tfsdk:"channel_used"` + // Total execution time of the statement ( excluding result fetch time ). + Duration types.Int64 `tfsdk:"duration"` + // Alias for `warehouse_id`. + EndpointId types.String `tfsdk:"endpoint_id"` + // Message describing why the query could not complete. + ErrorMessage types.String `tfsdk:"error_message"` + // The ID of the user whose credentials were used to run the query. + ExecutedAsUserId types.Int64 `tfsdk:"executed_as_user_id"` + // The email address or username of the user whose credentials were used to + // run the query. + ExecutedAsUserName types.String `tfsdk:"executed_as_user_name"` + // The time execution of the query ended. + ExecutionEndTimeMs types.Int64 `tfsdk:"execution_end_time_ms"` + // Whether more updates for the query are expected. + IsFinal types.Bool `tfsdk:"is_final"` + // A key that can be used to look up query details. + LookupKey types.String `tfsdk:"lookup_key"` + // Metrics about query execution. + Metrics *QueryMetrics `tfsdk:"metrics"` + // Whether plans exist for the execution, or the reason why they are missing + PlansState PlansState `tfsdk:"plans_state"` + // The time the query ended. + QueryEndTimeMs types.Int64 `tfsdk:"query_end_time_ms"` + // The query ID. + QueryId types.String `tfsdk:"query_id"` + // The time the query started. + QueryStartTimeMs types.Int64 `tfsdk:"query_start_time_ms"` + // The text of the query. + QueryText types.String `tfsdk:"query_text"` + // The number of results returned by the query. + RowsProduced types.Int64 `tfsdk:"rows_produced"` + // URL to the query plan. + SparkUiUrl types.String `tfsdk:"spark_ui_url"` + // Type of statement for this query + StatementType QueryStatementType `tfsdk:"statement_type"` + // Query status with one the following values: * `QUEUED`: Query has been + // received and queued. * `RUNNING`: Query has started. * `CANCELED`: Query + // has been cancelled by the user. * `FAILED`: Query has failed. * + // `FINISHED`: Query has completed. + Status QueryStatus `tfsdk:"status"` + // The ID of the user who ran the query. + UserId types.Int64 `tfsdk:"user_id"` + // The email address or username of the user who ran the query. + UserName types.String `tfsdk:"user_name"` + // Warehouse ID. + WarehouseId types.String `tfsdk:"warehouse_id"` +} + +type QueryList struct { + // The total number of queries. + Count types.Int64 `tfsdk:"count"` + // The page number that is currently displayed. + Page types.Int64 `tfsdk:"page"` + // The number of queries per page. + PageSize types.Int64 `tfsdk:"page_size"` + // List of queries returned. + Results []Query `tfsdk:"results"` +} + +// Metrics about query execution. +type QueryMetrics struct { + // Time spent loading metadata and optimizing the query, in milliseconds. + CompilationTimeMs types.Int64 `tfsdk:"compilation_time_ms"` + // Time spent executing the query, in milliseconds. + ExecutionTimeMs types.Int64 `tfsdk:"execution_time_ms"` + // Reserved for internal use. + MetadataTimeMs types.Int64 `tfsdk:"metadata_time_ms"` + // Total amount of data sent over the network between executor nodes during + // shuffle, in bytes. + NetworkSentBytes types.Int64 `tfsdk:"network_sent_bytes"` + // Timestamp of when the query was enqueued waiting while the warehouse was + // at max load. This field is optional and will not appear if the query + // skipped the overloading queue. + OverloadingQueueStartTimestamp types.Int64 `tfsdk:"overloading_queue_start_timestamp"` + // Total execution time for all individual Photon query engine tasks in the + // query, in milliseconds. + PhotonTotalTimeMs types.Int64 `tfsdk:"photon_total_time_ms"` + // Reserved for internal use. + PlanningTimeMs types.Int64 `tfsdk:"planning_time_ms"` + // Timestamp of when the query was enqueued waiting for a cluster to be + // provisioned for the warehouse. This field is optional and will not appear + // if the query skipped the provisioning queue. + ProvisioningQueueStartTimestamp types.Int64 `tfsdk:"provisioning_queue_start_timestamp"` + // Total number of bytes in all tables not read due to pruning + PrunedBytes types.Int64 `tfsdk:"pruned_bytes"` + // Total number of files from all tables not read due to pruning + PrunedFilesCount types.Int64 `tfsdk:"pruned_files_count"` + // Timestamp of when the underlying compute started compilation of the + // query. + QueryCompilationStartTimestamp types.Int64 `tfsdk:"query_compilation_start_timestamp"` + // Reserved for internal use. + QueryExecutionTimeMs types.Int64 `tfsdk:"query_execution_time_ms"` + // Total size of data read by the query, in bytes. + ReadBytes types.Int64 `tfsdk:"read_bytes"` + // Size of persistent data read from the cache, in bytes. + ReadCacheBytes types.Int64 `tfsdk:"read_cache_bytes"` + // Number of files read after pruning. + ReadFilesCount types.Int64 `tfsdk:"read_files_count"` + // Number of partitions read after pruning. + ReadPartitionsCount types.Int64 `tfsdk:"read_partitions_count"` + // Size of persistent data read from cloud object storage on your cloud + // tenant, in bytes. + ReadRemoteBytes types.Int64 `tfsdk:"read_remote_bytes"` + // Time spent fetching the query results after the execution finished, in + // milliseconds. + ResultFetchTimeMs types.Int64 `tfsdk:"result_fetch_time_ms"` + // true if the query result was fetched from cache, false otherwise. + ResultFromCache types.Bool `tfsdk:"result_from_cache"` + // Total number of rows returned by the query. + RowsProducedCount types.Int64 `tfsdk:"rows_produced_count"` + // Total number of rows read by the query. + RowsReadCount types.Int64 `tfsdk:"rows_read_count"` + // Size of data temporarily written to disk while executing the query, in + // bytes. + SpillToDiskBytes types.Int64 `tfsdk:"spill_to_disk_bytes"` + // Sum of execution time for all of the query’s tasks, in milliseconds. + TaskTotalTimeMs types.Int64 `tfsdk:"task_total_time_ms"` + // Total execution time of the query from the client’s point of view, in + // milliseconds. + TotalTimeMs types.Int64 `tfsdk:"total_time_ms"` + // Size pf persistent data written to cloud object storage in your cloud + // tenant, in bytes. + WriteRemoteBytes types.Int64 `tfsdk:"write_remote_bytes"` +} + +type QueryOptions struct { + // The name of the catalog to execute this query in. + Catalog types.String `tfsdk:"catalog"` + // The timestamp when this query was moved to trash. Only present when the + // `is_archived` property is `true`. Trashed items are deleted after thirty + // days. + MovedToTrashAt types.String `tfsdk:"moved_to_trash_at"` + + Parameters []Parameter `tfsdk:"parameters"` + // The name of the schema to execute this query in. + Schema types.String `tfsdk:"schema"` +} + +type QueryPostContent struct { + // Data source ID maps to the ID of the data source used by the resource and + // is distinct from the warehouse ID. [Learn more] + // + // [Learn more]: https://docs.databricks.com/api/workspace/datasources/list + DataSourceId types.String `tfsdk:"data_source_id"` + // General description that conveys additional information about this query + // such as usage notes. + Description types.String `tfsdk:"description"` + // The title of this query that appears in list views, widget headings, and + // on the query page. + Name types.String `tfsdk:"name"` + // Exclusively used for storing a list parameter definitions. A parameter is + // an object with `title`, `name`, `type`, and `value` properties. The + // `value` field here is the default value. It can be overridden at runtime. + Options any `tfsdk:"options"` + // The identifier of the workspace folder containing the object. + Parent types.String `tfsdk:"parent"` + // The text of the query to be run. + Query types.String `tfsdk:"query"` + // Sets the **Run as** role for the object. Must be set to one of `"viewer"` + // (signifying "run as viewer" behavior) or `"owner"` (signifying "run as + // owner" behavior) + RunAsRole RunAsRole `tfsdk:"run_as_role"` + + Tags []types.String `tfsdk:"tags"` +} + +// Type of statement for this query +type QueryStatementType string + +const QueryStatementTypeAlter QueryStatementType = `ALTER` + +const QueryStatementTypeAnalyze QueryStatementType = `ANALYZE` + +const QueryStatementTypeCopy QueryStatementType = `COPY` + +const QueryStatementTypeCreate QueryStatementType = `CREATE` + +const QueryStatementTypeDelete QueryStatementType = `DELETE` + +const QueryStatementTypeDescribe QueryStatementType = `DESCRIBE` + +const QueryStatementTypeDrop QueryStatementType = `DROP` + +const QueryStatementTypeExplain QueryStatementType = `EXPLAIN` + +const QueryStatementTypeGrant QueryStatementType = `GRANT` + +const QueryStatementTypeInsert QueryStatementType = `INSERT` + +const QueryStatementTypeMerge QueryStatementType = `MERGE` + +const QueryStatementTypeOptimize QueryStatementType = `OPTIMIZE` + +const QueryStatementTypeOther QueryStatementType = `OTHER` + +const QueryStatementTypeRefresh QueryStatementType = `REFRESH` + +const QueryStatementTypeReplace QueryStatementType = `REPLACE` + +const QueryStatementTypeRevoke QueryStatementType = `REVOKE` + +const QueryStatementTypeSelect QueryStatementType = `SELECT` + +const QueryStatementTypeSet QueryStatementType = `SET` + +const QueryStatementTypeShow QueryStatementType = `SHOW` + +const QueryStatementTypeTruncate QueryStatementType = `TRUNCATE` + +const QueryStatementTypeUpdate QueryStatementType = `UPDATE` + +const QueryStatementTypeUse QueryStatementType = `USE` + +// String representation for [fmt.Print] +func (f *QueryStatementType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *QueryStatementType) Set(v string) error { + switch v { + case `ALTER`, `ANALYZE`, `COPY`, `CREATE`, `DELETE`, `DESCRIBE`, `DROP`, `EXPLAIN`, `GRANT`, `INSERT`, `MERGE`, `OPTIMIZE`, `OTHER`, `REFRESH`, `REPLACE`, `REVOKE`, `SELECT`, `SET`, `SHOW`, `TRUNCATE`, `UPDATE`, `USE`: + *f = QueryStatementType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ALTER", "ANALYZE", "COPY", "CREATE", "DELETE", "DESCRIBE", "DROP", "EXPLAIN", "GRANT", "INSERT", "MERGE", "OPTIMIZE", "OTHER", "REFRESH", "REPLACE", "REVOKE", "SELECT", "SET", "SHOW", "TRUNCATE", "UPDATE", "USE"`, v) + } +} + +// Type always returns QueryStatementType to satisfy [pflag.Value] interface +func (f *QueryStatementType) Type() string { + return "QueryStatementType" +} + +// Query status with one the following values: * `QUEUED`: Query has been +// received and queued. * `RUNNING`: Query has started. * `CANCELED`: Query has +// been cancelled by the user. * `FAILED`: Query has failed. * `FINISHED`: Query +// has completed. +type QueryStatus string + +// Query has been cancelled by the user. +const QueryStatusCanceled QueryStatus = `CANCELED` + +// Query has failed. +const QueryStatusFailed QueryStatus = `FAILED` + +// Query has completed. +const QueryStatusFinished QueryStatus = `FINISHED` + +// Query has been received and queued. +const QueryStatusQueued QueryStatus = `QUEUED` + +// Query has started. +const QueryStatusRunning QueryStatus = `RUNNING` + +// String representation for [fmt.Print] +func (f *QueryStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *QueryStatus) Set(v string) error { + switch v { + case `CANCELED`, `FAILED`, `FINISHED`, `QUEUED`, `RUNNING`: + *f = QueryStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "FAILED", "FINISHED", "QUEUED", "RUNNING"`, v) + } +} + +// Type always returns QueryStatus to satisfy [pflag.Value] interface +func (f *QueryStatus) Type() string { + return "QueryStatus" +} + +type RepeatedEndpointConfPairs struct { + // Deprecated: Use configuration_pairs + ConfigPair []EndpointConfPair `tfsdk:"config_pair"` + + ConfigurationPairs []EndpointConfPair `tfsdk:"configuration_pairs"` +} + +// Restore a dashboard +type RestoreDashboardRequest struct { + DashboardId types.String `tfsdk:"-" url:"-"` +} + +// Restore a query +type RestoreQueryRequest struct { + QueryId types.String `tfsdk:"-" url:"-"` +} + +type RestoreResponse struct { +} + +// Contains the result data of a single chunk when using `INLINE` disposition. +// When using `EXTERNAL_LINKS` disposition, the array `external_links` is used +// instead to provide presigned URLs to the result data in cloud storage. +// Exactly one of these alternatives is used. (While the `external_links` array +// prepares the API to return multiple links in a single response. Currently +// only a single link is returned.) +type ResultData struct { + // The number of bytes in the result chunk. This field is not available when + // using `INLINE` disposition. + ByteCount types.Int64 `tfsdk:"byte_count"` + // The position within the sequence of result set chunks. + ChunkIndex types.Int64 `tfsdk:"chunk_index"` + // The `JSON_ARRAY` format is an array of arrays of values, where each + // non-null value is formatted as a string. Null values are encoded as JSON + // `null`. + DataArray [][]types.String `tfsdk:"data_array"` + + ExternalLinks []ExternalLink `tfsdk:"external_links"` + // When fetching, provides the `chunk_index` for the _next_ chunk. If + // absent, indicates there are no more chunks. The next chunk can be fetched + // with a :method:statementexecution/getStatementResultChunkN request. + NextChunkIndex types.Int64 `tfsdk:"next_chunk_index"` + // When fetching, provides a link to fetch the _next_ chunk. If absent, + // indicates there are no more chunks. This link is an absolute `path` to be + // joined with your `$DATABRICKS_HOST`, and should be treated as an opaque + // link. This is an alternative to using `next_chunk_index`. + NextChunkInternalLink types.String `tfsdk:"next_chunk_internal_link"` + // The number of rows within the result chunk. + RowCount types.Int64 `tfsdk:"row_count"` + // The starting row offset within the result set. + RowOffset types.Int64 `tfsdk:"row_offset"` +} + +// The result manifest provides schema and metadata for the result set. +type ResultManifest struct { + // Array of result set chunk metadata. + Chunks []BaseChunkInfo `tfsdk:"chunks"` + + Format Format `tfsdk:"format"` + // The schema is an ordered list of column descriptions. + Schema *ResultSchema `tfsdk:"schema"` + // The total number of bytes in the result set. This field is not available + // when using `INLINE` disposition. + TotalByteCount types.Int64 `tfsdk:"total_byte_count"` + // The total number of chunks that the result set has been divided into. + TotalChunkCount types.Int64 `tfsdk:"total_chunk_count"` + // The total number of rows in the result set. + TotalRowCount types.Int64 `tfsdk:"total_row_count"` + // Indicates whether the result is truncated due to `row_limit` or + // `byte_limit`. + Truncated types.Bool `tfsdk:"truncated"` +} + +// The schema is an ordered list of column descriptions. +type ResultSchema struct { + ColumnCount types.Int64 `tfsdk:"column_count"` + + Columns []ColumnInfo `tfsdk:"columns"` +} + +// Sets the **Run as** role for the object. Must be set to one of `"viewer"` +// (signifying "run as viewer" behavior) or `"owner"` (signifying "run as owner" +// behavior) +type RunAsRole string + +const RunAsRoleOwner RunAsRole = `owner` + +const RunAsRoleViewer RunAsRole = `viewer` + +// String representation for [fmt.Print] +func (f *RunAsRole) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RunAsRole) Set(v string) error { + switch v { + case `owner`, `viewer`: + *f = RunAsRole(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "owner", "viewer"`, v) + } +} + +// Type always returns RunAsRole to satisfy [pflag.Value] interface +func (f *RunAsRole) Type() string { + return "RunAsRole" +} + +type ServiceError struct { + ErrorCode ServiceErrorCode `tfsdk:"error_code"` + // A brief summary of the error condition. + Message types.String `tfsdk:"message"` +} + +type ServiceErrorCode string + +const ServiceErrorCodeAborted ServiceErrorCode = `ABORTED` + +const ServiceErrorCodeAlreadyExists ServiceErrorCode = `ALREADY_EXISTS` + +const ServiceErrorCodeBadRequest ServiceErrorCode = `BAD_REQUEST` + +const ServiceErrorCodeCancelled ServiceErrorCode = `CANCELLED` + +const ServiceErrorCodeDeadlineExceeded ServiceErrorCode = `DEADLINE_EXCEEDED` + +const ServiceErrorCodeInternalError ServiceErrorCode = `INTERNAL_ERROR` + +const ServiceErrorCodeIoError ServiceErrorCode = `IO_ERROR` + +const ServiceErrorCodeNotFound ServiceErrorCode = `NOT_FOUND` + +const ServiceErrorCodeResourceExhausted ServiceErrorCode = `RESOURCE_EXHAUSTED` + +const ServiceErrorCodeServiceUnderMaintenance ServiceErrorCode = `SERVICE_UNDER_MAINTENANCE` + +const ServiceErrorCodeTemporarilyUnavailable ServiceErrorCode = `TEMPORARILY_UNAVAILABLE` + +const ServiceErrorCodeUnauthenticated ServiceErrorCode = `UNAUTHENTICATED` + +const ServiceErrorCodeUnknown ServiceErrorCode = `UNKNOWN` + +const ServiceErrorCodeWorkspaceTemporarilyUnavailable ServiceErrorCode = `WORKSPACE_TEMPORARILY_UNAVAILABLE` + +// String representation for [fmt.Print] +func (f *ServiceErrorCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServiceErrorCode) Set(v string) error { + switch v { + case `ABORTED`, `ALREADY_EXISTS`, `BAD_REQUEST`, `CANCELLED`, `DEADLINE_EXCEEDED`, `INTERNAL_ERROR`, `IO_ERROR`, `NOT_FOUND`, `RESOURCE_EXHAUSTED`, `SERVICE_UNDER_MAINTENANCE`, `TEMPORARILY_UNAVAILABLE`, `UNAUTHENTICATED`, `UNKNOWN`, `WORKSPACE_TEMPORARILY_UNAVAILABLE`: + *f = ServiceErrorCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ABORTED", "ALREADY_EXISTS", "BAD_REQUEST", "CANCELLED", "DEADLINE_EXCEEDED", "INTERNAL_ERROR", "IO_ERROR", "NOT_FOUND", "RESOURCE_EXHAUSTED", "SERVICE_UNDER_MAINTENANCE", "TEMPORARILY_UNAVAILABLE", "UNAUTHENTICATED", "UNKNOWN", "WORKSPACE_TEMPORARILY_UNAVAILABLE"`, v) + } +} + +// Type always returns ServiceErrorCode to satisfy [pflag.Value] interface +func (f *ServiceErrorCode) Type() string { + return "ServiceErrorCode" +} + +// Set object ACL +type SetRequest struct { + AccessControlList []AccessControl `tfsdk:"access_control_list"` + // Object ID. The ACL for the object with this UUID is overwritten by this + // request's POST content. + ObjectId types.String `tfsdk:"-" url:"-"` + // The type of object permission to set. + ObjectType ObjectTypePlural `tfsdk:"-" url:"-"` +} + +type SetResponse struct { + AccessControlList []AccessControl `tfsdk:"access_control_list"` + // An object's type and UUID, separated by a forward slash (/) character. + ObjectId types.String `tfsdk:"object_id"` + // A singular noun object type. + ObjectType ObjectType `tfsdk:"object_type"` +} + +type SetWorkspaceWarehouseConfigRequest struct { + // Optional: Channel selection details + Channel *Channel `tfsdk:"channel"` + // Deprecated: Use sql_configuration_parameters + ConfigParam *RepeatedEndpointConfPairs `tfsdk:"config_param"` + // Spark confs for external hive metastore configuration JSON serialized + // size must be less than <= 512K + DataAccessConfig []EndpointConfPair `tfsdk:"data_access_config"` + // List of Warehouse Types allowed in this workspace (limits allowed value + // of the type field in CreateWarehouse and EditWarehouse). Note: Some types + // cannot be disabled, they don't need to be specified in + // SetWorkspaceWarehouseConfig. Note: Disabling a type may cause existing + // warehouses to be converted to another type. Used by frontend to save + // specific type availability in the warehouse create and edit form UI. + EnabledWarehouseTypes []WarehouseTypePair `tfsdk:"enabled_warehouse_types"` + // Deprecated: Use sql_configuration_parameters + GlobalParam *RepeatedEndpointConfPairs `tfsdk:"global_param"` + // GCP only: Google Service Account used to pass to cluster to access Google + // Cloud Storage + GoogleServiceAccount types.String `tfsdk:"google_service_account"` + // AWS Only: Instance profile used to pass IAM role to the cluster + InstanceProfileArn types.String `tfsdk:"instance_profile_arn"` + // Security policy for warehouses + SecurityPolicy SetWorkspaceWarehouseConfigRequestSecurityPolicy `tfsdk:"security_policy"` + // SQL configuration parameters + SqlConfigurationParameters *RepeatedEndpointConfPairs `tfsdk:"sql_configuration_parameters"` +} + +// Security policy for warehouses +type SetWorkspaceWarehouseConfigRequestSecurityPolicy string + +const SetWorkspaceWarehouseConfigRequestSecurityPolicyDataAccessControl SetWorkspaceWarehouseConfigRequestSecurityPolicy = `DATA_ACCESS_CONTROL` + +const SetWorkspaceWarehouseConfigRequestSecurityPolicyNone SetWorkspaceWarehouseConfigRequestSecurityPolicy = `NONE` + +const SetWorkspaceWarehouseConfigRequestSecurityPolicyPassthrough SetWorkspaceWarehouseConfigRequestSecurityPolicy = `PASSTHROUGH` + +// String representation for [fmt.Print] +func (f *SetWorkspaceWarehouseConfigRequestSecurityPolicy) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SetWorkspaceWarehouseConfigRequestSecurityPolicy) Set(v string) error { + switch v { + case `DATA_ACCESS_CONTROL`, `NONE`, `PASSTHROUGH`: + *f = SetWorkspaceWarehouseConfigRequestSecurityPolicy(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DATA_ACCESS_CONTROL", "NONE", "PASSTHROUGH"`, v) + } +} + +// Type always returns SetWorkspaceWarehouseConfigRequestSecurityPolicy to satisfy [pflag.Value] interface +func (f *SetWorkspaceWarehouseConfigRequestSecurityPolicy) Type() string { + return "SetWorkspaceWarehouseConfigRequestSecurityPolicy" +} + +type SetWorkspaceWarehouseConfigResponse struct { +} + +// Configurations whether the warehouse should use spot instances. +type SpotInstancePolicy string + +const SpotInstancePolicyCostOptimized SpotInstancePolicy = `COST_OPTIMIZED` + +const SpotInstancePolicyPolicyUnspecified SpotInstancePolicy = `POLICY_UNSPECIFIED` + +const SpotInstancePolicyReliabilityOptimized SpotInstancePolicy = `RELIABILITY_OPTIMIZED` + +// String representation for [fmt.Print] +func (f *SpotInstancePolicy) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SpotInstancePolicy) Set(v string) error { + switch v { + case `COST_OPTIMIZED`, `POLICY_UNSPECIFIED`, `RELIABILITY_OPTIMIZED`: + *f = SpotInstancePolicy(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COST_OPTIMIZED", "POLICY_UNSPECIFIED", "RELIABILITY_OPTIMIZED"`, v) + } +} + +// Type always returns SpotInstancePolicy to satisfy [pflag.Value] interface +func (f *SpotInstancePolicy) Type() string { + return "SpotInstancePolicy" +} + +// Start a warehouse +type StartRequest struct { + // Required. Id of the SQL warehouse. + Id types.String `tfsdk:"-" url:"-"` +} + +type StartWarehouseResponse struct { +} + +// State of the warehouse +type State string + +const StateDeleted State = `DELETED` + +const StateDeleting State = `DELETING` + +const StateRunning State = `RUNNING` + +const StateStarting State = `STARTING` + +const StateStopped State = `STOPPED` + +const StateStopping State = `STOPPING` + +// String representation for [fmt.Print] +func (f *State) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *State) Set(v string) error { + switch v { + case `DELETED`, `DELETING`, `RUNNING`, `STARTING`, `STOPPED`, `STOPPING`: + *f = State(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETED", "DELETING", "RUNNING", "STARTING", "STOPPED", "STOPPING"`, v) + } +} + +// Type always returns State to satisfy [pflag.Value] interface +func (f *State) Type() string { + return "State" +} + +type StatementParameterListItem struct { + // The name of a parameter marker to be substituted in the statement. + Name types.String `tfsdk:"name"` + // The data type, given as a string. For example: `INT`, `STRING`, + // `DECIMAL(10,2)`. If no type is given the type is assumed to be `STRING`. + // Complex types, such as `ARRAY`, `MAP`, and `STRUCT` are not supported. + // For valid types, refer to the section [Data types] of the SQL language + // reference. + // + // [Data types]: https://docs.databricks.com/sql/language-manual/functions/cast.html + Type types.String `tfsdk:"type"` + // The value to substitute, represented as a string. If omitted, the value + // is interpreted as NULL. + Value types.String `tfsdk:"value"` +} + +// Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: +// running - `SUCCEEDED`: execution was successful, result data available for +// fetch - `FAILED`: execution failed; reason for failure described in +// accomanying error message - `CANCELED`: user canceled; can come from explicit +// cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution +// successful, and statement closed; result no longer available for fetch +type StatementState string + +// user canceled; can come from explicit cancel call, or timeout with +// `on_wait_timeout=CANCEL` +const StatementStateCanceled StatementState = `CANCELED` + +// execution successful, and statement closed; result no longer available for +// fetch +const StatementStateClosed StatementState = `CLOSED` + +// execution failed; reason for failure described in accomanying error message +const StatementStateFailed StatementState = `FAILED` + +// waiting for warehouse +const StatementStatePending StatementState = `PENDING` + +// running +const StatementStateRunning StatementState = `RUNNING` + +// execution was successful, result data available for fetch +const StatementStateSucceeded StatementState = `SUCCEEDED` + +// String representation for [fmt.Print] +func (f *StatementState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *StatementState) Set(v string) error { + switch v { + case `CANCELED`, `CLOSED`, `FAILED`, `PENDING`, `RUNNING`, `SUCCEEDED`: + *f = StatementState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELED", "CLOSED", "FAILED", "PENDING", "RUNNING", "SUCCEEDED"`, v) + } +} + +// Type always returns StatementState to satisfy [pflag.Value] interface +func (f *StatementState) Type() string { + return "StatementState" +} + +// The status response includes execution state and if relevant, error +// information. +type StatementStatus struct { + Error *ServiceError `tfsdk:"error"` + // Statement execution state: - `PENDING`: waiting for warehouse - + // `RUNNING`: running - `SUCCEEDED`: execution was successful, result data + // available for fetch - `FAILED`: execution failed; reason for failure + // described in accomanying error message - `CANCELED`: user canceled; can + // come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` + // - `CLOSED`: execution successful, and statement closed; result no longer + // available for fetch + State StatementState `tfsdk:"state"` +} + +// Health status of the warehouse. +type Status string + +const StatusDegraded Status = `DEGRADED` + +const StatusFailed Status = `FAILED` + +const StatusHealthy Status = `HEALTHY` + +const StatusStatusUnspecified Status = `STATUS_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *Status) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Status) Set(v string) error { + switch v { + case `DEGRADED`, `FAILED`, `HEALTHY`, `STATUS_UNSPECIFIED`: + *f = Status(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DEGRADED", "FAILED", "HEALTHY", "STATUS_UNSPECIFIED"`, v) + } +} + +// Type always returns Status to satisfy [pflag.Value] interface +func (f *Status) Type() string { + return "Status" +} + +// Stop a warehouse +type StopRequest struct { + // Required. Id of the SQL warehouse. + Id types.String `tfsdk:"-" url:"-"` +} + +type StopWarehouseResponse struct { +} + +type Success struct { + Message SuccessMessage `tfsdk:"message"` +} + +type SuccessMessage string + +const SuccessMessageSuccess SuccessMessage = `Success` + +// String representation for [fmt.Print] +func (f *SuccessMessage) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SuccessMessage) Set(v string) error { + switch v { + case `Success`: + *f = SuccessMessage(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "Success"`, v) + } +} + +// Type always returns SuccessMessage to satisfy [pflag.Value] interface +func (f *SuccessMessage) Type() string { + return "SuccessMessage" +} + +type TerminationReason struct { + // status code indicating why the cluster was terminated + Code TerminationReasonCode `tfsdk:"code"` + // list of parameters that provide additional information about why the + // cluster was terminated + Parameters map[string]types.String `tfsdk:"parameters"` + // type of the termination + Type TerminationReasonType `tfsdk:"type"` +} + +// status code indicating why the cluster was terminated +type TerminationReasonCode string + +const TerminationReasonCodeAbuseDetected TerminationReasonCode = `ABUSE_DETECTED` + +const TerminationReasonCodeAttachProjectFailure TerminationReasonCode = `ATTACH_PROJECT_FAILURE` + +const TerminationReasonCodeAwsAuthorizationFailure TerminationReasonCode = `AWS_AUTHORIZATION_FAILURE` + +const TerminationReasonCodeAwsInsufficientFreeAddressesInSubnetFailure TerminationReasonCode = `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE` + +const TerminationReasonCodeAwsInsufficientInstanceCapacityFailure TerminationReasonCode = `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE` + +const TerminationReasonCodeAwsMaxSpotInstanceCountExceededFailure TerminationReasonCode = `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE` + +const TerminationReasonCodeAwsRequestLimitExceeded TerminationReasonCode = `AWS_REQUEST_LIMIT_EXCEEDED` + +const TerminationReasonCodeAwsUnsupportedFailure TerminationReasonCode = `AWS_UNSUPPORTED_FAILURE` + +const TerminationReasonCodeAzureByokKeyPermissionFailure TerminationReasonCode = `AZURE_BYOK_KEY_PERMISSION_FAILURE` + +const TerminationReasonCodeAzureEphemeralDiskFailure TerminationReasonCode = `AZURE_EPHEMERAL_DISK_FAILURE` + +const TerminationReasonCodeAzureInvalidDeploymentTemplate TerminationReasonCode = `AZURE_INVALID_DEPLOYMENT_TEMPLATE` + +const TerminationReasonCodeAzureOperationNotAllowedException TerminationReasonCode = `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION` + +const TerminationReasonCodeAzureQuotaExceededException TerminationReasonCode = `AZURE_QUOTA_EXCEEDED_EXCEPTION` + +const TerminationReasonCodeAzureResourceManagerThrottling TerminationReasonCode = `AZURE_RESOURCE_MANAGER_THROTTLING` + +const TerminationReasonCodeAzureResourceProviderThrottling TerminationReasonCode = `AZURE_RESOURCE_PROVIDER_THROTTLING` + +const TerminationReasonCodeAzureUnexpectedDeploymentTemplateFailure TerminationReasonCode = `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE` + +const TerminationReasonCodeAzureVmExtensionFailure TerminationReasonCode = `AZURE_VM_EXTENSION_FAILURE` + +const TerminationReasonCodeAzureVnetConfigurationFailure TerminationReasonCode = `AZURE_VNET_CONFIGURATION_FAILURE` + +const TerminationReasonCodeBootstrapTimeout TerminationReasonCode = `BOOTSTRAP_TIMEOUT` + +const TerminationReasonCodeBootstrapTimeoutCloudProviderException TerminationReasonCode = `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION` + +const TerminationReasonCodeCloudProviderDiskSetupFailure TerminationReasonCode = `CLOUD_PROVIDER_DISK_SETUP_FAILURE` + +const TerminationReasonCodeCloudProviderLaunchFailure TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE` + +const TerminationReasonCodeCloudProviderResourceStockout TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT` + +const TerminationReasonCodeCloudProviderShutdown TerminationReasonCode = `CLOUD_PROVIDER_SHUTDOWN` + +const TerminationReasonCodeCommunicationLost TerminationReasonCode = `COMMUNICATION_LOST` + +const TerminationReasonCodeContainerLaunchFailure TerminationReasonCode = `CONTAINER_LAUNCH_FAILURE` + +const TerminationReasonCodeControlPlaneRequestFailure TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE` + +const TerminationReasonCodeDatabaseConnectionFailure TerminationReasonCode = `DATABASE_CONNECTION_FAILURE` + +const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE` + +const TerminationReasonCodeDriverUnreachable TerminationReasonCode = `DRIVER_UNREACHABLE` + +const TerminationReasonCodeDriverUnresponsive TerminationReasonCode = `DRIVER_UNRESPONSIVE` + +const TerminationReasonCodeExecutionComponentUnhealthy TerminationReasonCode = `EXECUTION_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeGcpQuotaExceeded TerminationReasonCode = `GCP_QUOTA_EXCEEDED` + +const TerminationReasonCodeGcpServiceAccountDeleted TerminationReasonCode = `GCP_SERVICE_ACCOUNT_DELETED` + +const TerminationReasonCodeGlobalInitScriptFailure TerminationReasonCode = `GLOBAL_INIT_SCRIPT_FAILURE` + +const TerminationReasonCodeHiveMetastoreProvisioningFailure TerminationReasonCode = `HIVE_METASTORE_PROVISIONING_FAILURE` + +const TerminationReasonCodeImagePullPermissionDenied TerminationReasonCode = `IMAGE_PULL_PERMISSION_DENIED` + +const TerminationReasonCodeInactivity TerminationReasonCode = `INACTIVITY` + +const TerminationReasonCodeInitScriptFailure TerminationReasonCode = `INIT_SCRIPT_FAILURE` + +const TerminationReasonCodeInstancePoolClusterFailure TerminationReasonCode = `INSTANCE_POOL_CLUSTER_FAILURE` + +const TerminationReasonCodeInstanceUnreachable TerminationReasonCode = `INSTANCE_UNREACHABLE` + +const TerminationReasonCodeInternalError TerminationReasonCode = `INTERNAL_ERROR` + +const TerminationReasonCodeInvalidArgument TerminationReasonCode = `INVALID_ARGUMENT` + +const TerminationReasonCodeInvalidSparkImage TerminationReasonCode = `INVALID_SPARK_IMAGE` + +const TerminationReasonCodeIpExhaustionFailure TerminationReasonCode = `IP_EXHAUSTION_FAILURE` + +const TerminationReasonCodeJobFinished TerminationReasonCode = `JOB_FINISHED` + +const TerminationReasonCodeK8sAutoscalingFailure TerminationReasonCode = `K8S_AUTOSCALING_FAILURE` + +const TerminationReasonCodeK8sDbrClusterLaunchTimeout TerminationReasonCode = `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT` + +const TerminationReasonCodeMetastoreComponentUnhealthy TerminationReasonCode = `METASTORE_COMPONENT_UNHEALTHY` + +const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEPHOS_RESOURCE_MANAGEMENT` + +const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE` + +const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE` + +const TerminationReasonCodeNpipTunnelSetupFailure TerminationReasonCode = `NPIP_TUNNEL_SETUP_FAILURE` + +const TerminationReasonCodeNpipTunnelTokenFailure TerminationReasonCode = `NPIP_TUNNEL_TOKEN_FAILURE` + +const TerminationReasonCodeRequestRejected TerminationReasonCode = `REQUEST_REJECTED` + +const TerminationReasonCodeRequestThrottled TerminationReasonCode = `REQUEST_THROTTLED` + +const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR` + +const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION` + +const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE` + +const TerminationReasonCodeSkippedSlowNodes TerminationReasonCode = `SKIPPED_SLOW_NODES` + +const TerminationReasonCodeSlowImageDownload TerminationReasonCode = `SLOW_IMAGE_DOWNLOAD` + +const TerminationReasonCodeSparkError TerminationReasonCode = `SPARK_ERROR` + +const TerminationReasonCodeSparkImageDownloadFailure TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_FAILURE` + +const TerminationReasonCodeSparkStartupFailure TerminationReasonCode = `SPARK_STARTUP_FAILURE` + +const TerminationReasonCodeSpotInstanceTermination TerminationReasonCode = `SPOT_INSTANCE_TERMINATION` + +const TerminationReasonCodeStorageDownloadFailure TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE` + +const TerminationReasonCodeStsClientSetupFailure TerminationReasonCode = `STS_CLIENT_SETUP_FAILURE` + +const TerminationReasonCodeSubnetExhaustedFailure TerminationReasonCode = `SUBNET_EXHAUSTED_FAILURE` + +const TerminationReasonCodeTemporarilyUnavailable TerminationReasonCode = `TEMPORARILY_UNAVAILABLE` + +const TerminationReasonCodeTrialExpired TerminationReasonCode = `TRIAL_EXPIRED` + +const TerminationReasonCodeUnexpectedLaunchFailure TerminationReasonCode = `UNEXPECTED_LAUNCH_FAILURE` + +const TerminationReasonCodeUnknown TerminationReasonCode = `UNKNOWN` + +const TerminationReasonCodeUnsupportedInstanceType TerminationReasonCode = `UNSUPPORTED_INSTANCE_TYPE` + +const TerminationReasonCodeUpdateInstanceProfileFailure TerminationReasonCode = `UPDATE_INSTANCE_PROFILE_FAILURE` + +const TerminationReasonCodeUserRequest TerminationReasonCode = `USER_REQUEST` + +const TerminationReasonCodeWorkerSetupFailure TerminationReasonCode = `WORKER_SETUP_FAILURE` + +const TerminationReasonCodeWorkspaceCancelledError TerminationReasonCode = `WORKSPACE_CANCELLED_ERROR` + +const TerminationReasonCodeWorkspaceConfigurationError TerminationReasonCode = `WORKSPACE_CONFIGURATION_ERROR` + +// String representation for [fmt.Print] +func (f *TerminationReasonCode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationReasonCode) Set(v string) error { + switch v { + case `ABUSE_DETECTED`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_SHUTDOWN`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `DATABASE_CONNECTION_FAILURE`, `DBFS_COMPONENT_UNHEALTHY`, `DOCKER_IMAGE_PULL_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `EXECUTION_COMPONENT_UNHEALTHY`, `GCP_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_UNREACHABLE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_SPARK_IMAGE`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `STORAGE_DOWNLOAD_FAILURE`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`: + *f = TerminationReasonCode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_SHUTDOWN", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "DATABASE_CONNECTION_FAILURE", "DBFS_COMPONENT_UNHEALTHY", "DOCKER_IMAGE_PULL_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "EXECUTION_COMPONENT_UNHEALTHY", "GCP_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_DELETED", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_UNREACHABLE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_SPARK_IMAGE", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "STORAGE_DOWNLOAD_FAILURE", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR"`, v) + } +} + +// Type always returns TerminationReasonCode to satisfy [pflag.Value] interface +func (f *TerminationReasonCode) Type() string { + return "TerminationReasonCode" +} + +// type of the termination +type TerminationReasonType string + +const TerminationReasonTypeClientError TerminationReasonType = `CLIENT_ERROR` + +const TerminationReasonTypeCloudFailure TerminationReasonType = `CLOUD_FAILURE` + +const TerminationReasonTypeServiceFault TerminationReasonType = `SERVICE_FAULT` + +const TerminationReasonTypeSuccess TerminationReasonType = `SUCCESS` + +// String representation for [fmt.Print] +func (f *TerminationReasonType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TerminationReasonType) Set(v string) error { + switch v { + case `CLIENT_ERROR`, `CLOUD_FAILURE`, `SERVICE_FAULT`, `SUCCESS`: + *f = TerminationReasonType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLIENT_ERROR", "CLOUD_FAILURE", "SERVICE_FAULT", "SUCCESS"`, v) + } +} + +// Type always returns TerminationReasonType to satisfy [pflag.Value] interface +func (f *TerminationReasonType) Type() string { + return "TerminationReasonType" +} + +type TimeRange struct { + // Limit results to queries that started before this time. + EndTimeMs types.Int64 `tfsdk:"end_time_ms" url:"end_time_ms,omitempty"` + // Limit results to queries that started after this time. + StartTimeMs types.Int64 `tfsdk:"start_time_ms" url:"start_time_ms,omitempty"` +} + +type TransferOwnershipObjectId struct { + // Email address for the new owner, who must exist in the workspace. + NewOwner types.String `tfsdk:"new_owner"` +} + +// Transfer object ownership +type TransferOwnershipRequest struct { + // Email address for the new owner, who must exist in the workspace. + NewOwner types.String `tfsdk:"new_owner"` + // The ID of the object on which to change ownership. + ObjectId TransferOwnershipObjectId `tfsdk:"-" url:"-"` + // The type of object on which to change ownership. + ObjectType OwnableObjectType `tfsdk:"-" url:"-"` +} + +type UpdateResponse struct { +} + +type User struct { + Email types.String `tfsdk:"email"` + + Id types.Int64 `tfsdk:"id"` + + Name types.String `tfsdk:"name"` +} + +// The visualization description API changes frequently and is unsupported. You +// can duplicate a visualization by copying description objects received _from +// the API_ and then using them to create a new one with a POST request to the +// same endpoint. Databricks does not recommend constructing ad-hoc +// visualizations entirely in JSON. +type Visualization struct { + CreatedAt types.String `tfsdk:"created_at"` + // A short description of this visualization. This is not displayed in the + // UI. + Description types.String `tfsdk:"description"` + // The UUID for this visualization. + Id types.String `tfsdk:"id"` + // The name of the visualization that appears on dashboards and the query + // screen. + Name types.String `tfsdk:"name"` + // The options object varies widely from one visualization type to the next + // and is unsupported. Databricks does not recommend modifying visualization + // settings in JSON. + Options any `tfsdk:"options"` + + Query *Query `tfsdk:"query"` + // The type of visualization: chart, table, pivot table, and so on. + Type types.String `tfsdk:"type"` + + UpdatedAt types.String `tfsdk:"updated_at"` +} + +type WarehouseAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel WarehousePermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type WarehouseAccessControlResponse struct { + // All permissions. + AllPermissions []WarehousePermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type WarehousePermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel WarehousePermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type WarehousePermissionLevel string + +const WarehousePermissionLevelCanManage WarehousePermissionLevel = `CAN_MANAGE` + +const WarehousePermissionLevelCanUse WarehousePermissionLevel = `CAN_USE` + +const WarehousePermissionLevelIsOwner WarehousePermissionLevel = `IS_OWNER` + +// String representation for [fmt.Print] +func (f *WarehousePermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WarehousePermissionLevel) Set(v string) error { + switch v { + case `CAN_MANAGE`, `CAN_USE`, `IS_OWNER`: + *f = WarehousePermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_USE", "IS_OWNER"`, v) + } +} + +// Type always returns WarehousePermissionLevel to satisfy [pflag.Value] interface +func (f *WarehousePermissionLevel) Type() string { + return "WarehousePermissionLevel" +} + +type WarehousePermissions struct { + AccessControlList []WarehouseAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type WarehousePermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel WarehousePermissionLevel `tfsdk:"permission_level"` +} + +type WarehousePermissionsRequest struct { + AccessControlList []WarehouseAccessControlRequest `tfsdk:"access_control_list"` + // The SQL warehouse for which to get or manage permissions. + WarehouseId types.String `tfsdk:"-" url:"-"` +} + +type WarehouseTypePair struct { + // If set to false the specific warehouse type will not be be allowed as a + // value for warehouse_type in CreateWarehouse and EditWarehouse + Enabled types.Bool `tfsdk:"enabled"` + // Warehouse type: `PRO` or `CLASSIC`. + WarehouseType WarehouseTypePairWarehouseType `tfsdk:"warehouse_type"` +} + +// Warehouse type: `PRO` or `CLASSIC`. +type WarehouseTypePairWarehouseType string + +const WarehouseTypePairWarehouseTypeClassic WarehouseTypePairWarehouseType = `CLASSIC` + +const WarehouseTypePairWarehouseTypePro WarehouseTypePairWarehouseType = `PRO` + +const WarehouseTypePairWarehouseTypeTypeUnspecified WarehouseTypePairWarehouseType = `TYPE_UNSPECIFIED` + +// String representation for [fmt.Print] +func (f *WarehouseTypePairWarehouseType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WarehouseTypePairWarehouseType) Set(v string) error { + switch v { + case `CLASSIC`, `PRO`, `TYPE_UNSPECIFIED`: + *f = WarehouseTypePairWarehouseType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CLASSIC", "PRO", "TYPE_UNSPECIFIED"`, v) + } +} + +// Type always returns WarehouseTypePairWarehouseType to satisfy [pflag.Value] interface +func (f *WarehouseTypePairWarehouseType) Type() string { + return "WarehouseTypePairWarehouseType" +} + +type Widget struct { + // The unique ID for this widget. + Id types.String `tfsdk:"id"` + + Options *WidgetOptions `tfsdk:"options"` + // The visualization description API changes frequently and is unsupported. + // You can duplicate a visualization by copying description objects received + // _from the API_ and then using them to create a new one with a POST + // request to the same endpoint. Databricks does not recommend constructing + // ad-hoc visualizations entirely in JSON. + Visualization *Visualization `tfsdk:"visualization"` + // Unused field. + Width types.Int64 `tfsdk:"width"` +} + +type WidgetOptions struct { + // Timestamp when this object was created + CreatedAt types.String `tfsdk:"created_at"` + // Custom description of the widget + Description types.String `tfsdk:"description"` + // Whether this widget is hidden on the dashboard. + IsHidden types.Bool `tfsdk:"isHidden"` + // How parameters used by the visualization in this widget relate to other + // widgets on the dashboard. Databricks does not recommend modifying this + // definition in JSON. + ParameterMappings any `tfsdk:"parameterMappings"` + // Coordinates of this widget on a dashboard. This portion of the API + // changes frequently and is unsupported. + Position *WidgetPosition `tfsdk:"position"` + // Custom title of the widget + Title types.String `tfsdk:"title"` + // Timestamp of the last time this object was updated. + UpdatedAt types.String `tfsdk:"updated_at"` +} + +// Coordinates of this widget on a dashboard. This portion of the API changes +// frequently and is unsupported. +type WidgetPosition struct { + // reserved for internal use + AutoHeight types.Bool `tfsdk:"autoHeight"` + // column in the dashboard grid. Values start with 0 + Col types.Int64 `tfsdk:"col"` + // row in the dashboard grid. Values start with 0 + Row types.Int64 `tfsdk:"row"` + // width of the widget measured in dashboard grid cells + SizeX types.Int64 `tfsdk:"sizeX"` + // height of the widget measured in dashboard grid cells + SizeY types.Int64 `tfsdk:"sizeY"` +} diff --git a/service/vectorsearch_tf/model.go b/service/vectorsearch_tf/model.go new file mode 100755 index 0000000000..d9aa34652e --- /dev/null +++ b/service/vectorsearch_tf/model.go @@ -0,0 +1,641 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package vectorsearch_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ColumnInfo struct { + // Name of the column. + Name types.String `tfsdk:"name"` +} + +type CreateEndpoint struct { + // Type of endpoint. + EndpointType EndpointType `tfsdk:"endpoint_type"` + // Name of endpoint + Name types.String `tfsdk:"name"` +} + +type CreateVectorIndexRequest struct { + // Specification for Delta Sync Index. Required if `index_type` is + // `DELTA_SYNC`. + DeltaSyncIndexSpec *DeltaSyncVectorIndexSpecRequest `tfsdk:"delta_sync_index_spec"` + // Specification for Direct Vector Access Index. Required if `index_type` is + // `DIRECT_ACCESS`. + DirectAccessIndexSpec *DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec"` + // Name of the endpoint to be used for serving the index + EndpointName types.String `tfsdk:"endpoint_name"` + // There are 2 types of Vector Search indexes: + // + // - `DELTA_SYNC`: An index that automatically syncs with a source Delta + // Table, automatically and incrementally updating the index as the + // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index + // that supports direct read and write of vectors and metadata through our + // REST and SDK APIs. With this model, the user manages index updates. + IndexType VectorIndexType `tfsdk:"index_type"` + // Name of the index + Name types.String `tfsdk:"name"` + // Primary key of the index + PrimaryKey types.String `tfsdk:"primary_key"` +} + +type CreateVectorIndexResponse struct { + VectorIndex *VectorIndex `tfsdk:"vector_index"` +} + +// Result of the upsert or delete operation. +type DeleteDataResult struct { + // List of primary keys for rows that failed to process. + FailedPrimaryKeys []types.String `tfsdk:"failed_primary_keys"` + // Count of successfully processed rows. + SuccessRowCount types.Int64 `tfsdk:"success_row_count"` +} + +// Status of the delete operation. +type DeleteDataStatus string + +const DeleteDataStatusFailure DeleteDataStatus = `FAILURE` + +const DeleteDataStatusPartialSuccess DeleteDataStatus = `PARTIAL_SUCCESS` + +const DeleteDataStatusSuccess DeleteDataStatus = `SUCCESS` + +// String representation for [fmt.Print] +func (f *DeleteDataStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DeleteDataStatus) Set(v string) error { + switch v { + case `FAILURE`, `PARTIAL_SUCCESS`, `SUCCESS`: + *f = DeleteDataStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILURE", "PARTIAL_SUCCESS", "SUCCESS"`, v) + } +} + +// Type always returns DeleteDataStatus to satisfy [pflag.Value] interface +func (f *DeleteDataStatus) Type() string { + return "DeleteDataStatus" +} + +// Request payload for deleting data from a vector index. +type DeleteDataVectorIndexRequest struct { + // Name of the vector index where data is to be deleted. Must be a Direct + // Vector Access Index. + IndexName types.String `tfsdk:"-" url:"-"` + // List of primary keys for the data to be deleted. + PrimaryKeys []types.String `tfsdk:"primary_keys"` +} + +// Response to a delete data vector index request. +type DeleteDataVectorIndexResponse struct { + // Result of the upsert or delete operation. + Result *DeleteDataResult `tfsdk:"result"` + // Status of the delete operation. + Status DeleteDataStatus `tfsdk:"status"` +} + +// Delete an endpoint +type DeleteEndpointRequest struct { + // Name of the endpoint + EndpointName types.String `tfsdk:"-" url:"-"` +} + +type DeleteEndpointResponse struct { +} + +// Delete an index +type DeleteIndexRequest struct { + // Name of the index + IndexName types.String `tfsdk:"-" url:"-"` +} + +type DeleteIndexResponse struct { +} + +type DeltaSyncVectorIndexSpecRequest struct { + // The columns that contain the embedding source. + EmbeddingSourceColumns []EmbeddingSourceColumn `tfsdk:"embedding_source_columns"` + // The columns that contain the embedding vectors. The format should be + // array[double]. + EmbeddingVectorColumns []EmbeddingVectorColumn `tfsdk:"embedding_vector_columns"` + // [Optional] Automatically sync the vector index contents and computed + // embeddings to the specified Delta table. The only supported table name is + // the index name with the suffix `_writeback_table`. + EmbeddingWritebackTable types.String `tfsdk:"embedding_writeback_table"` + // Pipeline execution mode. + // + // - `TRIGGERED`: If the pipeline uses the triggered execution mode, the + // system stops processing after successfully refreshing the source table in + // the pipeline once, ensuring the table is updated based on the data + // available when the update started. - `CONTINUOUS`: If the pipeline uses + // continuous execution, the pipeline processes new data as it arrives in + // the source table to keep vector index fresh. + PipelineType PipelineType `tfsdk:"pipeline_type"` + // The name of the source table. + SourceTable types.String `tfsdk:"source_table"` +} + +type DeltaSyncVectorIndexSpecResponse struct { + // The columns that contain the embedding source. + EmbeddingSourceColumns []EmbeddingSourceColumn `tfsdk:"embedding_source_columns"` + // The columns that contain the embedding vectors. + EmbeddingVectorColumns []EmbeddingVectorColumn `tfsdk:"embedding_vector_columns"` + // [Optional] Name of the Delta table to sync the vector index contents and + // computed embeddings to. + EmbeddingWritebackTable types.String `tfsdk:"embedding_writeback_table"` + // The ID of the pipeline that is used to sync the index. + PipelineId types.String `tfsdk:"pipeline_id"` + // Pipeline execution mode. + // + // - `TRIGGERED`: If the pipeline uses the triggered execution mode, the + // system stops processing after successfully refreshing the source table in + // the pipeline once, ensuring the table is updated based on the data + // available when the update started. - `CONTINUOUS`: If the pipeline uses + // continuous execution, the pipeline processes new data as it arrives in + // the source table to keep vector index fresh. + PipelineType PipelineType `tfsdk:"pipeline_type"` + // The name of the source table. + SourceTable types.String `tfsdk:"source_table"` +} + +type DirectAccessVectorIndexSpec struct { + // Contains the optional model endpoint to use during query time. + EmbeddingSourceColumns []EmbeddingSourceColumn `tfsdk:"embedding_source_columns"` + + EmbeddingVectorColumns []EmbeddingVectorColumn `tfsdk:"embedding_vector_columns"` + // The schema of the index in JSON format. + // + // Supported types are `integer`, `long`, `float`, `double`, `boolean`, + // `string`, `date`, `timestamp`. + // + // Supported types for vector column: `array`, `array`,`. + SchemaJson types.String `tfsdk:"schema_json"` +} + +type EmbeddingSourceColumn struct { + // Name of the embedding model endpoint + EmbeddingModelEndpointName types.String `tfsdk:"embedding_model_endpoint_name"` + // Name of the column + Name types.String `tfsdk:"name"` +} + +type EmbeddingVectorColumn struct { + // Dimension of the embedding vector + EmbeddingDimension types.Int64 `tfsdk:"embedding_dimension"` + // Name of the column + Name types.String `tfsdk:"name"` +} + +type EndpointInfo struct { + // Timestamp of endpoint creation + CreationTimestamp types.Int64 `tfsdk:"creation_timestamp"` + // Creator of the endpoint + Creator types.String `tfsdk:"creator"` + // Current status of the endpoint + EndpointStatus *EndpointStatus `tfsdk:"endpoint_status"` + // Type of endpoint. + EndpointType EndpointType `tfsdk:"endpoint_type"` + // Unique identifier of the endpoint + Id types.String `tfsdk:"id"` + // Timestamp of last update to the endpoint + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` + // User who last updated the endpoint + LastUpdatedUser types.String `tfsdk:"last_updated_user"` + // Name of endpoint + Name types.String `tfsdk:"name"` + // Number of indexes on the endpoint + NumIndexes types.Int64 `tfsdk:"num_indexes"` +} + +// Status information of an endpoint +type EndpointStatus struct { + // Additional status message + Message types.String `tfsdk:"message"` + // Current state of the endpoint + State EndpointStatusState `tfsdk:"state"` +} + +// Current state of the endpoint +type EndpointStatusState string + +const EndpointStatusStateOffline EndpointStatusState = `OFFLINE` + +const EndpointStatusStateOnline EndpointStatusState = `ONLINE` + +const EndpointStatusStateProvisioning EndpointStatusState = `PROVISIONING` + +// String representation for [fmt.Print] +func (f *EndpointStatusState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointStatusState) Set(v string) error { + switch v { + case `OFFLINE`, `ONLINE`, `PROVISIONING`: + *f = EndpointStatusState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OFFLINE", "ONLINE", "PROVISIONING"`, v) + } +} + +// Type always returns EndpointStatusState to satisfy [pflag.Value] interface +func (f *EndpointStatusState) Type() string { + return "EndpointStatusState" +} + +// Type of endpoint. +type EndpointType string + +const EndpointTypeStandard EndpointType = `STANDARD` + +// String representation for [fmt.Print] +func (f *EndpointType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EndpointType) Set(v string) error { + switch v { + case `STANDARD`: + *f = EndpointType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "STANDARD"`, v) + } +} + +// Type always returns EndpointType to satisfy [pflag.Value] interface +func (f *EndpointType) Type() string { + return "EndpointType" +} + +// Get an endpoint +type GetEndpointRequest struct { + // Name of the endpoint + EndpointName types.String `tfsdk:"-" url:"-"` +} + +// Get an index +type GetIndexRequest struct { + // Name of the index + IndexName types.String `tfsdk:"-" url:"-"` +} + +type ListEndpointResponse struct { + // An array of Endpoint objects + Endpoints []EndpointInfo `tfsdk:"endpoints"` + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken types.String `tfsdk:"next_page_token"` +} + +// List all endpoints +type ListEndpointsRequest struct { + // Token for pagination + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +// List indexes +type ListIndexesRequest struct { + // Name of the endpoint + EndpointName types.String `tfsdk:"-" url:"endpoint_name"` + // Token for pagination + PageToken types.String `tfsdk:"-" url:"page_token,omitempty"` +} + +type ListValue struct { + Values []Value `tfsdk:"values"` +} + +type ListVectorIndexesResponse struct { + // A token that can be used to get the next page of results. If not present, + // there are no more results to show. + NextPageToken types.String `tfsdk:"next_page_token"` + + VectorIndexes []MiniVectorIndex `tfsdk:"vector_indexes"` +} + +// Key-value pair. +type MapStringValueEntry struct { + // Column name. + Key types.String `tfsdk:"key"` + // Column value, nullable. + Value *Value `tfsdk:"value"` +} + +type MiniVectorIndex struct { + // The user who created the index. + Creator types.String `tfsdk:"creator"` + // Name of the endpoint associated with the index + EndpointName types.String `tfsdk:"endpoint_name"` + // There are 2 types of Vector Search indexes: + // + // - `DELTA_SYNC`: An index that automatically syncs with a source Delta + // Table, automatically and incrementally updating the index as the + // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index + // that supports direct read and write of vectors and metadata through our + // REST and SDK APIs. With this model, the user manages index updates. + IndexType VectorIndexType `tfsdk:"index_type"` + // Name of the index + Name types.String `tfsdk:"name"` + // Primary key of the index + PrimaryKey types.String `tfsdk:"primary_key"` +} + +// Pipeline execution mode. +// +// - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system +// stops processing after successfully refreshing the source table in the +// pipeline once, ensuring the table is updated based on the data available when +// the update started. - `CONTINUOUS`: If the pipeline uses continuous +// execution, the pipeline processes new data as it arrives in the source table +// to keep vector index fresh. +type PipelineType string + +// If the pipeline uses continuous execution, the pipeline processes new data as +// it arrives in the source table to keep vector index fresh. +const PipelineTypeContinuous PipelineType = `CONTINUOUS` + +// If the pipeline uses the triggered execution mode, the system stops +// processing after successfully refreshing the source table in the pipeline +// once, ensuring the table is updated based on the data available when the +// update started. +const PipelineTypeTriggered PipelineType = `TRIGGERED` + +// String representation for [fmt.Print] +func (f *PipelineType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PipelineType) Set(v string) error { + switch v { + case `CONTINUOUS`, `TRIGGERED`: + *f = PipelineType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CONTINUOUS", "TRIGGERED"`, v) + } +} + +// Type always returns PipelineType to satisfy [pflag.Value] interface +func (f *PipelineType) Type() string { + return "PipelineType" +} + +// Request payload for getting next page of results. +type QueryVectorIndexNextPageRequest struct { + // Name of the endpoint. + EndpointName types.String `tfsdk:"endpoint_name"` + // Name of the vector index to query. + IndexName types.String `tfsdk:"-" url:"-"` + // Page token returned from previous `QueryVectorIndex` or + // `QueryVectorIndexNextPage` API. + PageToken types.String `tfsdk:"page_token"` +} + +type QueryVectorIndexRequest struct { + // List of column names to include in the response. + Columns []types.String `tfsdk:"columns"` + // JSON string representing query filters. + // + // Example filters: - `{"id <": 5}`: Filter for id less than 5. - `{"id >": + // 5}`: Filter for id greater than 5. - `{"id <=": 5}`: Filter for id less + // than equal to 5. - `{"id >=": 5}`: Filter for id greater than equal to 5. + // - `{"id": 5}`: Filter for id equal to 5. + FiltersJson types.String `tfsdk:"filters_json"` + // Name of the vector index to query. + IndexName types.String `tfsdk:"-" url:"-"` + // Number of results to return. Defaults to 10. + NumResults types.Int64 `tfsdk:"num_results"` + // Query text. Required for Delta Sync Index using model endpoint. + QueryText types.String `tfsdk:"query_text"` + // The query type to use. Choices are `ANN` and `HYBRID`. Defaults to `ANN`. + QueryType types.String `tfsdk:"query_type"` + // Query vector. Required for Direct Vector Access Index and Delta Sync + // Index using self-managed vectors. + QueryVector []types.Float64 `tfsdk:"query_vector"` + // Threshold for the approximate nearest neighbor search. Defaults to 0.0. + ScoreThreshold types.Float64 `tfsdk:"score_threshold"` +} + +type QueryVectorIndexResponse struct { + // Metadata about the result set. + Manifest *ResultManifest `tfsdk:"manifest"` + // [Optional] Token that can be used in `QueryVectorIndexNextPage` API to + // get next page of results. If more than 1000 results satisfy the query, + // they are returned in groups of 1000. Empty value means no more results. + NextPageToken types.String `tfsdk:"next_page_token"` + // Data returned in the query result. + Result *ResultData `tfsdk:"result"` +} + +// Data returned in the query result. +type ResultData struct { + // Data rows returned in the query. + DataArray [][]types.String `tfsdk:"data_array"` + // Number of rows in the result set. + RowCount types.Int64 `tfsdk:"row_count"` +} + +// Metadata about the result set. +type ResultManifest struct { + // Number of columns in the result set. + ColumnCount types.Int64 `tfsdk:"column_count"` + // Information about each column in the result set. + Columns []ColumnInfo `tfsdk:"columns"` +} + +// Request payload for scanning data from a vector index. +type ScanVectorIndexRequest struct { + // Name of the vector index to scan. + IndexName types.String `tfsdk:"-" url:"-"` + // Primary key of the last entry returned in the previous scan. + LastPrimaryKey types.String `tfsdk:"last_primary_key"` + // Number of results to return. Defaults to 10. + NumResults types.Int64 `tfsdk:"num_results"` +} + +// Response to a scan vector index request. +type ScanVectorIndexResponse struct { + // List of data entries + Data []Struct `tfsdk:"data"` + // Primary key of the last entry. + LastPrimaryKey types.String `tfsdk:"last_primary_key"` +} + +type Struct struct { + // Data entry, corresponding to a row in a vector index. + Fields []MapStringValueEntry `tfsdk:"fields"` +} + +// Synchronize an index +type SyncIndexRequest struct { + // Name of the vector index to synchronize. Must be a Delta Sync Index. + IndexName types.String `tfsdk:"-" url:"-"` +} + +type SyncIndexResponse struct { +} + +// Result of the upsert or delete operation. +type UpsertDataResult struct { + // List of primary keys for rows that failed to process. + FailedPrimaryKeys []types.String `tfsdk:"failed_primary_keys"` + // Count of successfully processed rows. + SuccessRowCount types.Int64 `tfsdk:"success_row_count"` +} + +// Status of the upsert operation. +type UpsertDataStatus string + +const UpsertDataStatusFailure UpsertDataStatus = `FAILURE` + +const UpsertDataStatusPartialSuccess UpsertDataStatus = `PARTIAL_SUCCESS` + +const UpsertDataStatusSuccess UpsertDataStatus = `SUCCESS` + +// String representation for [fmt.Print] +func (f *UpsertDataStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *UpsertDataStatus) Set(v string) error { + switch v { + case `FAILURE`, `PARTIAL_SUCCESS`, `SUCCESS`: + *f = UpsertDataStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FAILURE", "PARTIAL_SUCCESS", "SUCCESS"`, v) + } +} + +// Type always returns UpsertDataStatus to satisfy [pflag.Value] interface +func (f *UpsertDataStatus) Type() string { + return "UpsertDataStatus" +} + +// Request payload for upserting data into a vector index. +type UpsertDataVectorIndexRequest struct { + // Name of the vector index where data is to be upserted. Must be a Direct + // Vector Access Index. + IndexName types.String `tfsdk:"-" url:"-"` + // JSON string representing the data to be upserted. + InputsJson types.String `tfsdk:"inputs_json"` +} + +// Response to an upsert data vector index request. +type UpsertDataVectorIndexResponse struct { + // Result of the upsert or delete operation. + Result *UpsertDataResult `tfsdk:"result"` + // Status of the upsert operation. + Status UpsertDataStatus `tfsdk:"status"` +} + +type Value struct { + BoolValue types.Bool `tfsdk:"bool_value"` + + ListValue *ListValue `tfsdk:"list_value"` + + NullValue types.String `tfsdk:"null_value"` + + NumberValue types.Float64 `tfsdk:"number_value"` + + StringValue types.String `tfsdk:"string_value"` + + StructValue *Struct `tfsdk:"struct_value"` +} + +type VectorIndex struct { + // The user who created the index. + Creator types.String `tfsdk:"creator"` + + DeltaSyncIndexSpec *DeltaSyncVectorIndexSpecResponse `tfsdk:"delta_sync_index_spec"` + + DirectAccessIndexSpec *DirectAccessVectorIndexSpec `tfsdk:"direct_access_index_spec"` + // Name of the endpoint associated with the index + EndpointName types.String `tfsdk:"endpoint_name"` + // There are 2 types of Vector Search indexes: + // + // - `DELTA_SYNC`: An index that automatically syncs with a source Delta + // Table, automatically and incrementally updating the index as the + // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index + // that supports direct read and write of vectors and metadata through our + // REST and SDK APIs. With this model, the user manages index updates. + IndexType VectorIndexType `tfsdk:"index_type"` + // Name of the index + Name types.String `tfsdk:"name"` + // Primary key of the index + PrimaryKey types.String `tfsdk:"primary_key"` + + Status *VectorIndexStatus `tfsdk:"status"` +} + +type VectorIndexStatus struct { + // Index API Url to be used to perform operations on the index + IndexUrl types.String `tfsdk:"index_url"` + // Number of rows indexed + IndexedRowCount types.Int64 `tfsdk:"indexed_row_count"` + // Message associated with the index status + Message types.String `tfsdk:"message"` + // Whether the index is ready for search + Ready types.Bool `tfsdk:"ready"` +} + +// There are 2 types of Vector Search indexes: +// +// - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, +// automatically and incrementally updating the index as the underlying data in +// the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct +// read and write of vectors and metadata through our REST and SDK APIs. With +// this model, the user manages index updates. +type VectorIndexType string + +// An index that automatically syncs with a source Delta Table, automatically +// and incrementally updating the index as the underlying data in the Delta +// Table changes. +const VectorIndexTypeDeltaSync VectorIndexType = `DELTA_SYNC` + +// An index that supports direct read and write of vectors and metadata through +// our REST and SDK APIs. With this model, the user manages index updates. +const VectorIndexTypeDirectAccess VectorIndexType = `DIRECT_ACCESS` + +// String representation for [fmt.Print] +func (f *VectorIndexType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *VectorIndexType) Set(v string) error { + switch v { + case `DELTA_SYNC`, `DIRECT_ACCESS`: + *f = VectorIndexType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELTA_SYNC", "DIRECT_ACCESS"`, v) + } +} + +// Type always returns VectorIndexType to satisfy [pflag.Value] interface +func (f *VectorIndexType) Type() string { + return "VectorIndexType" +} diff --git a/service/workspace_tf/model.go b/service/workspace_tf/model.go new file mode 100755 index 0000000000..1e7a8257a5 --- /dev/null +++ b/service/workspace_tf/model.go @@ -0,0 +1,931 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package workspace_tf + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AclItem struct { + // The permission level applied to the principal. + Permission AclPermission `tfsdk:"permission"` + // The principal in which the permission is applied. + Principal types.String `tfsdk:"principal"` +} + +type AclPermission string + +const AclPermissionManage AclPermission = `MANAGE` + +const AclPermissionRead AclPermission = `READ` + +const AclPermissionWrite AclPermission = `WRITE` + +// String representation for [fmt.Print] +func (f *AclPermission) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AclPermission) Set(v string) error { + switch v { + case `MANAGE`, `READ`, `WRITE`: + *f = AclPermission(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "MANAGE", "READ", "WRITE"`, v) + } +} + +// Type always returns AclPermission to satisfy [pflag.Value] interface +func (f *AclPermission) Type() string { + return "AclPermission" +} + +type AzureKeyVaultSecretScopeMetadata struct { + // The DNS of the KeyVault + DnsName types.String `tfsdk:"dns_name"` + // The resource id of the azure KeyVault that user wants to associate the + // scope with. + ResourceId types.String `tfsdk:"resource_id"` +} + +type CreateCredentials struct { + // Git provider. This field is case-insensitive. The available Git providers + // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, + // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and + // awsCodeCommit. + GitProvider types.String `tfsdk:"git_provider"` + // The username or email provided with your Git provider account, depending + // on which provider you are using. For GitHub, GitHub Enterprise Server, or + // Azure DevOps Services, either email or username may be used. For GitLab, + // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, + // BitBucket or BitBucket Server, username must be used. For all other + // providers please see your provider's Personal Access Token authentication + // documentation to see what is supported. + GitUsername types.String `tfsdk:"git_username"` + // The personal access token used to authenticate to the corresponding Git + // provider. For certain providers, support may exist for other types of + // scoped access tokens. [Learn more]. The personal access token used to + // authenticate to the corresponding Git + // + // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html + PersonalAccessToken types.String `tfsdk:"personal_access_token"` +} + +type CreateCredentialsResponse struct { + // ID of the credential object in the workspace. + CredentialId types.Int64 `tfsdk:"credential_id"` + // Git provider. This field is case-insensitive. The available Git providers + // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, + // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and + // awsCodeCommit. + GitProvider types.String `tfsdk:"git_provider"` + // The username or email provided with your Git provider account, depending + // on which provider you are using. For GitHub, GitHub Enterprise Server, or + // Azure DevOps Services, either email or username may be used. For GitLab, + // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, + // BitBucket or BitBucket Server, username must be used. For all other + // providers please see your provider's Personal Access Token authentication + // documentation to see what is supported. + GitUsername types.String `tfsdk:"git_username"` +} + +type CreateRepo struct { + // Desired path for the repo in the workspace. Almost any path in the + // workspace can be chosen. If repo is created in /Repos, path must be in + // the format /Repos/{folder}/{repo-name}. + Path types.String `tfsdk:"path"` + // Git provider. This field is case-insensitive. The available Git providers + // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, + // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and + // awsCodeCommit. + Provider types.String `tfsdk:"provider"` + // If specified, the repo will be created with sparse checkout enabled. You + // cannot enable/disable sparse checkout after the repo is created. + SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout"` + // URL of the Git repository to be linked. + Url types.String `tfsdk:"url"` +} + +type CreateScope struct { + // The metadata for the secret scope if the type is `AZURE_KEYVAULT` + BackendAzureKeyvault *AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault"` + // The principal that is initially granted `MANAGE` permission to the + // created scope. + InitialManagePrincipal types.String `tfsdk:"initial_manage_principal"` + // Scope name requested by the user. Scope names are unique. + Scope types.String `tfsdk:"scope"` + // The backend type the scope will be created with. If not specified, will + // default to `DATABRICKS` + ScopeBackendType ScopeBackendType `tfsdk:"scope_backend_type"` +} + +type CreateScopeResponse struct { +} + +type CredentialInfo struct { + // ID of the credential object in the workspace. + CredentialId types.Int64 `tfsdk:"credential_id"` + // Git provider. This field is case-insensitive. The available Git providers + // are gitHub, gitHubOAuth, bitbucketCloud, gitLab, azureDevOpsServices, + // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and + // awsCodeCommit. + GitProvider types.String `tfsdk:"git_provider"` + // The username or email provided with your Git provider account, depending + // on which provider you are using. For GitHub, GitHub Enterprise Server, or + // Azure DevOps Services, either email or username may be used. For GitLab, + // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, + // BitBucket or BitBucket Server, username must be used. For all other + // providers please see your provider's Personal Access Token authentication + // documentation to see what is supported. + GitUsername types.String `tfsdk:"git_username"` +} + +type Delete struct { + // The absolute path of the notebook or directory. + Path types.String `tfsdk:"path"` + // The flag that specifies whether to delete the object recursively. It is + // `false` by default. Please note this deleting directory is not atomic. If + // it fails in the middle, some of objects under this directory may be + // deleted and cannot be undone. + Recursive types.Bool `tfsdk:"recursive"` +} + +type DeleteAcl struct { + // The principal to remove an existing ACL from. + Principal types.String `tfsdk:"principal"` + // The name of the scope to remove permissions from. + Scope types.String `tfsdk:"scope"` +} + +type DeleteAclResponse struct { +} + +// Delete a credential +type DeleteGitCredentialRequest struct { + // The ID for the corresponding credential to access. + CredentialId types.Int64 `tfsdk:"-" url:"-"` +} + +// Delete a repo +type DeleteRepoRequest struct { + // The ID for the corresponding repo to access. + RepoId types.Int64 `tfsdk:"-" url:"-"` +} + +type DeleteResponse struct { +} + +type DeleteScope struct { + // Name of the scope to delete. + Scope types.String `tfsdk:"scope"` +} + +type DeleteScopeResponse struct { +} + +type DeleteSecret struct { + // Name of the secret to delete. + Key types.String `tfsdk:"key"` + // The name of the scope that contains the secret to delete. + Scope types.String `tfsdk:"scope"` +} + +type DeleteSecretResponse struct { +} + +type ExportFormat string + +const ExportFormatAuto ExportFormat = `AUTO` + +const ExportFormatDbc ExportFormat = `DBC` + +const ExportFormatHtml ExportFormat = `HTML` + +const ExportFormatJupyter ExportFormat = `JUPYTER` + +const ExportFormatRMarkdown ExportFormat = `R_MARKDOWN` + +const ExportFormatSource ExportFormat = `SOURCE` + +// String representation for [fmt.Print] +func (f *ExportFormat) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExportFormat) Set(v string) error { + switch v { + case `AUTO`, `DBC`, `HTML`, `JUPYTER`, `R_MARKDOWN`, `SOURCE`: + *f = ExportFormat(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUTO", "DBC", "HTML", "JUPYTER", "R_MARKDOWN", "SOURCE"`, v) + } +} + +// Type always returns ExportFormat to satisfy [pflag.Value] interface +func (f *ExportFormat) Type() string { + return "ExportFormat" +} + +// Export a workspace object +type ExportRequest struct { + // This specifies the format of the exported file. By default, this is + // `SOURCE`. + // + // The value is case sensitive. + // + // - `SOURCE`: The notebook is exported as source code. Directory exports + // will not include non-notebook entries. - `HTML`: The notebook is exported + // as an HTML file. - `JUPYTER`: The notebook is exported as a + // Jupyter/IPython Notebook file. - `DBC`: The notebook is exported in + // Databricks archive format. Directory exports will not include + // non-notebook entries. - `R_MARKDOWN`: The notebook is exported to R + // Markdown format. - `AUTO`: The object or directory is exported depending + // on the objects type. Directory exports will include notebooks and + // workspace files. + Format ExportFormat `tfsdk:"-" url:"format,omitempty"` + // The absolute path of the object or directory. Exporting a directory is + // only supported for the `DBC`, `SOURCE`, and `AUTO` format. + Path types.String `tfsdk:"-" url:"path"` +} + +type ExportResponse struct { + // The base64-encoded content. If the limit (10MB) is exceeded, exception + // with error code **MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown. + Content types.String `tfsdk:"content"` + // The file type of the exported file. + FileType types.String `tfsdk:"file_type"` +} + +// Get secret ACL details +type GetAclRequest struct { + // The principal to fetch ACL information for. + Principal types.String `tfsdk:"-" url:"principal"` + // The name of the scope to fetch ACL information from. + Scope types.String `tfsdk:"-" url:"scope"` +} + +type GetCredentialsResponse struct { + Credentials []CredentialInfo `tfsdk:"credentials"` +} + +// Get a credential entry +type GetGitCredentialRequest struct { + // The ID for the corresponding credential to access. + CredentialId types.Int64 `tfsdk:"-" url:"-"` +} + +// Get repo permission levels +type GetRepoPermissionLevelsRequest struct { + // The repo for which to get or manage permissions. + RepoId types.String `tfsdk:"-" url:"-"` +} + +type GetRepoPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []RepoPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get repo permissions +type GetRepoPermissionsRequest struct { + // The repo for which to get or manage permissions. + RepoId types.String `tfsdk:"-" url:"-"` +} + +// Get a repo +type GetRepoRequest struct { + // The ID for the corresponding repo to access. + RepoId types.Int64 `tfsdk:"-" url:"-"` +} + +// Get a secret +type GetSecretRequest struct { + // The key to fetch secret for. + Key types.String `tfsdk:"-" url:"key"` + // The name of the scope to fetch secret information from. + Scope types.String `tfsdk:"-" url:"scope"` +} + +type GetSecretResponse struct { + // A unique name to identify the secret. + Key types.String `tfsdk:"key"` + // The value of the secret in its byte representation. + Value types.String `tfsdk:"value"` +} + +// Get status +type GetStatusRequest struct { + // The absolute path of the notebook or directory. + Path types.String `tfsdk:"-" url:"path"` +} + +// Get workspace object permission levels +type GetWorkspaceObjectPermissionLevelsRequest struct { + // The workspace object for which to get or manage permissions. + WorkspaceObjectId types.String `tfsdk:"-" url:"-"` + // The workspace object type for which to get or manage permissions. + WorkspaceObjectType types.String `tfsdk:"-" url:"-"` +} + +type GetWorkspaceObjectPermissionLevelsResponse struct { + // Specific permission levels + PermissionLevels []WorkspaceObjectPermissionsDescription `tfsdk:"permission_levels"` +} + +// Get workspace object permissions +type GetWorkspaceObjectPermissionsRequest struct { + // The workspace object for which to get or manage permissions. + WorkspaceObjectId types.String `tfsdk:"-" url:"-"` + // The workspace object type for which to get or manage permissions. + WorkspaceObjectType types.String `tfsdk:"-" url:"-"` +} + +type Import struct { + // The base64-encoded content. This has a limit of 10 MB. + // + // If the limit (10MB) is exceeded, exception with error code + // **MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown. This parameter might be absent, + // and instead a posted file is used. + Content types.String `tfsdk:"content"` + // This specifies the format of the file to be imported. + // + // The value is case sensitive. + // + // - `AUTO`: The item is imported depending on an analysis of the item's + // extension and the header content provided in the request. If the item is + // imported as a notebook, then the item's extension is automatically + // removed. - `SOURCE`: The notebook or directory is imported as source + // code. - `HTML`: The notebook is imported as an HTML file. - `JUPYTER`: + // The notebook is imported as a Jupyter/IPython Notebook file. - `DBC`: The + // notebook is imported in Databricks archive format. Required for + // directories. - `R_MARKDOWN`: The notebook is imported from R Markdown + // format. + Format ImportFormat `tfsdk:"format"` + // The language of the object. This value is set only if the object type is + // `NOTEBOOK`. + Language Language `tfsdk:"language"` + // The flag that specifies whether to overwrite existing object. It is + // `false` by default. For `DBC` format, `overwrite` is not supported since + // it may contain a directory. + Overwrite types.Bool `tfsdk:"overwrite"` + // The absolute path of the object or directory. Importing a directory is + // only supported for the `DBC` and `SOURCE` formats. + Path types.String `tfsdk:"path"` +} + +// This specifies the format of the file to be imported. +// +// The value is case sensitive. +// +// - `AUTO`: The item is imported depending on an analysis of the item's +// extension and the header content provided in the request. If the item is +// imported as a notebook, then the item's extension is automatically removed. - +// `SOURCE`: The notebook or directory is imported as source code. - `HTML`: The +// notebook is imported as an HTML file. - `JUPYTER`: The notebook is imported +// as a Jupyter/IPython Notebook file. - `DBC`: The notebook is imported in +// Databricks archive format. Required for directories. - `R_MARKDOWN`: The +// notebook is imported from R Markdown format. +type ImportFormat string + +// The item is imported depending on an analysis of the item's extension and +const ImportFormatAuto ImportFormat = `AUTO` + +// The notebook is imported in archive format. Required for +// directories. +const ImportFormatDbc ImportFormat = `DBC` + +// The notebook is imported as an HTML file. +const ImportFormatHtml ImportFormat = `HTML` + +// The notebook is imported as a Jupyter/IPython Notebook file. +const ImportFormatJupyter ImportFormat = `JUPYTER` + +// The notebook is imported from R Markdown format. +const ImportFormatRMarkdown ImportFormat = `R_MARKDOWN` + +// The notebook or directory is imported as source code. +const ImportFormatSource ImportFormat = `SOURCE` + +// String representation for [fmt.Print] +func (f *ImportFormat) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ImportFormat) Set(v string) error { + switch v { + case `AUTO`, `DBC`, `HTML`, `JUPYTER`, `R_MARKDOWN`, `SOURCE`: + *f = ImportFormat(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AUTO", "DBC", "HTML", "JUPYTER", "R_MARKDOWN", "SOURCE"`, v) + } +} + +// Type always returns ImportFormat to satisfy [pflag.Value] interface +func (f *ImportFormat) Type() string { + return "ImportFormat" +} + +type ImportResponse struct { +} + +// The language of the object. This value is set only if the object type is +// `NOTEBOOK`. +type Language string + +const LanguagePython Language = `PYTHON` + +const LanguageR Language = `R` + +const LanguageScala Language = `SCALA` + +const LanguageSql Language = `SQL` + +// String representation for [fmt.Print] +func (f *Language) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Language) Set(v string) error { + switch v { + case `PYTHON`, `R`, `SCALA`, `SQL`: + *f = Language(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PYTHON", "R", "SCALA", "SQL"`, v) + } +} + +// Type always returns Language to satisfy [pflag.Value] interface +func (f *Language) Type() string { + return "Language" +} + +// Lists ACLs +type ListAclsRequest struct { + // The name of the scope to fetch ACL information from. + Scope types.String `tfsdk:"-" url:"scope"` +} + +type ListAclsResponse struct { + // The associated ACLs rule applied to principals in the given scope. + Items []AclItem `tfsdk:"items"` +} + +// Get repos +type ListReposRequest struct { + // Token used to get the next page of results. If not specified, returns the + // first page of results as well as a next page token if there are more + // results. + NextPageToken types.String `tfsdk:"-" url:"next_page_token,omitempty"` + // Filters repos that have paths starting with the given path prefix. If not + // provided repos from /Repos will be served. + PathPrefix types.String `tfsdk:"-" url:"path_prefix,omitempty"` +} + +type ListReposResponse struct { + // Token that can be specified as a query parameter to the GET /repos + // endpoint to retrieve the next page of results. + NextPageToken types.String `tfsdk:"next_page_token"` + + Repos []RepoInfo `tfsdk:"repos"` +} + +type ListResponse struct { + // List of objects. + Objects []ObjectInfo `tfsdk:"objects"` +} + +type ListScopesResponse struct { + // The available secret scopes. + Scopes []SecretScope `tfsdk:"scopes"` +} + +// List secret keys +type ListSecretsRequest struct { + // The name of the scope to list secrets within. + Scope types.String `tfsdk:"-" url:"scope"` +} + +type ListSecretsResponse struct { + // Metadata information of all secrets contained within the given scope. + Secrets []SecretMetadata `tfsdk:"secrets"` +} + +// List contents +type ListWorkspaceRequest struct { + // UTC timestamp in milliseconds + NotebooksModifiedAfter types.Int64 `tfsdk:"-" url:"notebooks_modified_after,omitempty"` + // The absolute path of the notebook or directory. + Path types.String `tfsdk:"-" url:"path"` +} + +type Mkdirs struct { + // The absolute path of the directory. If the parent directories do not + // exist, it will also create them. If the directory already exists, this + // command will do nothing and succeed. + Path types.String `tfsdk:"path"` +} + +type MkdirsResponse struct { +} + +type ObjectInfo struct { + // Only applicable to files. The creation UTC timestamp. + CreatedAt types.Int64 `tfsdk:"created_at"` + // The language of the object. This value is set only if the object type is + // `NOTEBOOK`. + Language Language `tfsdk:"language"` + // Only applicable to files, the last modified UTC timestamp. + ModifiedAt types.Int64 `tfsdk:"modified_at"` + // Unique identifier for the object. + ObjectId types.Int64 `tfsdk:"object_id"` + // The type of the object in workspace. + // + // - `NOTEBOOK`: document that contains runnable code, visualizations, and + // explanatory text. - `DIRECTORY`: directory - `LIBRARY`: library - `FILE`: + // file - `REPO`: repository - `DASHBOARD`: Lakeview dashboard + ObjectType ObjectType `tfsdk:"object_type"` + // The absolute path of the object. + Path types.String `tfsdk:"path"` + // A unique identifier for the object that is consistent across all + // Databricks APIs. + ResourceId types.String `tfsdk:"resource_id"` + // Only applicable to files. The file size in bytes can be returned. + Size types.Int64 `tfsdk:"size"` +} + +// The type of the object in workspace. +// +// - `NOTEBOOK`: document that contains runnable code, visualizations, and +// explanatory text. - `DIRECTORY`: directory - `LIBRARY`: library - `FILE`: +// file - `REPO`: repository - `DASHBOARD`: Lakeview dashboard +type ObjectType string + +// Lakeview dashboard +const ObjectTypeDashboard ObjectType = `DASHBOARD` + +// directory +const ObjectTypeDirectory ObjectType = `DIRECTORY` + +// file +const ObjectTypeFile ObjectType = `FILE` + +// library +const ObjectTypeLibrary ObjectType = `LIBRARY` + +// document that contains runnable code, visualizations, and explanatory text. +const ObjectTypeNotebook ObjectType = `NOTEBOOK` + +// repository +const ObjectTypeRepo ObjectType = `REPO` + +// String representation for [fmt.Print] +func (f *ObjectType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ObjectType) Set(v string) error { + switch v { + case `DASHBOARD`, `DIRECTORY`, `FILE`, `LIBRARY`, `NOTEBOOK`, `REPO`: + *f = ObjectType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DASHBOARD", "DIRECTORY", "FILE", "LIBRARY", "NOTEBOOK", "REPO"`, v) + } +} + +// Type always returns ObjectType to satisfy [pflag.Value] interface +func (f *ObjectType) Type() string { + return "ObjectType" +} + +type PutAcl struct { + // The permission level applied to the principal. + Permission AclPermission `tfsdk:"permission"` + // The principal in which the permission is applied. + Principal types.String `tfsdk:"principal"` + // The name of the scope to apply permissions to. + Scope types.String `tfsdk:"scope"` +} + +type PutAclResponse struct { +} + +type PutSecret struct { + // If specified, value will be stored as bytes. + BytesValue types.String `tfsdk:"bytes_value"` + // A unique name to identify the secret. + Key types.String `tfsdk:"key"` + // The name of the scope to which the secret will be associated with. + Scope types.String `tfsdk:"scope"` + // If specified, note that the value will be stored in UTF-8 (MB4) form. + StringValue types.String `tfsdk:"string_value"` +} + +type PutSecretResponse struct { +} + +type RepoAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel RepoPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type RepoAccessControlResponse struct { + // All permissions. + AllPermissions []RepoPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type RepoInfo struct { + // Branch that the local version of the repo is checked out to. + Branch types.String `tfsdk:"branch"` + // SHA-1 hash representing the commit ID of the current HEAD of the repo. + HeadCommitId types.String `tfsdk:"head_commit_id"` + // ID of the repo object in the workspace. + Id types.Int64 `tfsdk:"id"` + // Desired path for the repo in the workspace. Almost any path in the + // workspace can be chosen. If repo is created in /Repos, path must be in + // the format /Repos/{folder}/{repo-name}. + Path types.String `tfsdk:"path"` + // Git provider. This field is case-insensitive. The available Git providers + // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, + // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and + // awsCodeCommit. + Provider types.String `tfsdk:"provider"` + + SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout"` + // URL of the Git repository to be linked. + Url types.String `tfsdk:"url"` +} + +type RepoPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel RepoPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type RepoPermissionLevel string + +const RepoPermissionLevelCanEdit RepoPermissionLevel = `CAN_EDIT` + +const RepoPermissionLevelCanManage RepoPermissionLevel = `CAN_MANAGE` + +const RepoPermissionLevelCanRead RepoPermissionLevel = `CAN_READ` + +const RepoPermissionLevelCanRun RepoPermissionLevel = `CAN_RUN` + +// String representation for [fmt.Print] +func (f *RepoPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RepoPermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_READ`, `CAN_RUN`: + *f = RepoPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_READ", "CAN_RUN"`, v) + } +} + +// Type always returns RepoPermissionLevel to satisfy [pflag.Value] interface +func (f *RepoPermissionLevel) Type() string { + return "RepoPermissionLevel" +} + +type RepoPermissions struct { + AccessControlList []RepoAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type RepoPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel RepoPermissionLevel `tfsdk:"permission_level"` +} + +type RepoPermissionsRequest struct { + AccessControlList []RepoAccessControlRequest `tfsdk:"access_control_list"` + // The repo for which to get or manage permissions. + RepoId types.String `tfsdk:"-" url:"-"` +} + +type ScopeBackendType string + +const ScopeBackendTypeAzureKeyvault ScopeBackendType = `AZURE_KEYVAULT` + +const ScopeBackendTypeDatabricks ScopeBackendType = `DATABRICKS` + +// String representation for [fmt.Print] +func (f *ScopeBackendType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ScopeBackendType) Set(v string) error { + switch v { + case `AZURE_KEYVAULT`, `DATABRICKS`: + *f = ScopeBackendType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AZURE_KEYVAULT", "DATABRICKS"`, v) + } +} + +// Type always returns ScopeBackendType to satisfy [pflag.Value] interface +func (f *ScopeBackendType) Type() string { + return "ScopeBackendType" +} + +type SecretMetadata struct { + // A unique name to identify the secret. + Key types.String `tfsdk:"key"` + // The last updated timestamp (in milliseconds) for the secret. + LastUpdatedTimestamp types.Int64 `tfsdk:"last_updated_timestamp"` +} + +type SecretScope struct { + // The type of secret scope backend. + BackendType ScopeBackendType `tfsdk:"backend_type"` + // The metadata for the secret scope if the type is `AZURE_KEYVAULT` + KeyvaultMetadata *AzureKeyVaultSecretScopeMetadata `tfsdk:"keyvault_metadata"` + // A unique name to identify the secret scope. + Name types.String `tfsdk:"name"` +} + +type SparseCheckout struct { + // List of patterns to include for sparse checkout. + Patterns []types.String `tfsdk:"patterns"` +} + +type SparseCheckoutUpdate struct { + // List of patterns to include for sparse checkout. + Patterns []types.String `tfsdk:"patterns"` +} + +type UpdateCredentials struct { + // The ID for the corresponding credential to access. + CredentialId types.Int64 `tfsdk:"-" url:"-"` + // Git provider. This field is case-insensitive. The available Git providers + // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, + // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and + // awsCodeCommit. + GitProvider types.String `tfsdk:"git_provider"` + // The username or email provided with your Git provider account, depending + // on which provider you are using. For GitHub, GitHub Enterprise Server, or + // Azure DevOps Services, either email or username may be used. For GitLab, + // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, + // BitBucket or BitBucket Server, username must be used. For all other + // providers please see your provider's Personal Access Token authentication + // documentation to see what is supported. + GitUsername types.String `tfsdk:"git_username"` + // The personal access token used to authenticate to the corresponding Git + // provider. For certain providers, support may exist for other types of + // scoped access tokens. [Learn more]. The personal access token used to + // authenticate to the corresponding Git + // + // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html + PersonalAccessToken types.String `tfsdk:"personal_access_token"` +} + +type UpdateRepo struct { + // Branch that the local version of the repo is checked out to. + Branch types.String `tfsdk:"branch"` + // The ID for the corresponding repo to access. + RepoId types.Int64 `tfsdk:"-" url:"-"` + // If specified, update the sparse checkout settings. The update will fail + // if sparse checkout is not enabled for the repo. + SparseCheckout *SparseCheckoutUpdate `tfsdk:"sparse_checkout"` + // Tag that the local version of the repo is checked out to. Updating the + // repo to a tag puts the repo in a detached HEAD state. Before committing + // new changes, you must update the repo to a branch instead of the detached + // HEAD. + Tag types.String `tfsdk:"tag"` +} + +type UpdateResponse struct { +} + +type WorkspaceObjectAccessControlRequest struct { + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Permission level + PermissionLevel WorkspaceObjectPermissionLevel `tfsdk:"permission_level"` + // application ID of a service principal + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type WorkspaceObjectAccessControlResponse struct { + // All permissions. + AllPermissions []WorkspaceObjectPermission `tfsdk:"all_permissions"` + // Display name of the user or service principal. + DisplayName types.String `tfsdk:"display_name"` + // name of the group + GroupName types.String `tfsdk:"group_name"` + // Name of the service principal. + ServicePrincipalName types.String `tfsdk:"service_principal_name"` + // name of the user + UserName types.String `tfsdk:"user_name"` +} + +type WorkspaceObjectPermission struct { + Inherited types.Bool `tfsdk:"inherited"` + + InheritedFromObject []types.String `tfsdk:"inherited_from_object"` + // Permission level + PermissionLevel WorkspaceObjectPermissionLevel `tfsdk:"permission_level"` +} + +// Permission level +type WorkspaceObjectPermissionLevel string + +const WorkspaceObjectPermissionLevelCanEdit WorkspaceObjectPermissionLevel = `CAN_EDIT` + +const WorkspaceObjectPermissionLevelCanManage WorkspaceObjectPermissionLevel = `CAN_MANAGE` + +const WorkspaceObjectPermissionLevelCanRead WorkspaceObjectPermissionLevel = `CAN_READ` + +const WorkspaceObjectPermissionLevelCanRun WorkspaceObjectPermissionLevel = `CAN_RUN` + +// String representation for [fmt.Print] +func (f *WorkspaceObjectPermissionLevel) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *WorkspaceObjectPermissionLevel) Set(v string) error { + switch v { + case `CAN_EDIT`, `CAN_MANAGE`, `CAN_READ`, `CAN_RUN`: + *f = WorkspaceObjectPermissionLevel(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CAN_EDIT", "CAN_MANAGE", "CAN_READ", "CAN_RUN"`, v) + } +} + +// Type always returns WorkspaceObjectPermissionLevel to satisfy [pflag.Value] interface +func (f *WorkspaceObjectPermissionLevel) Type() string { + return "WorkspaceObjectPermissionLevel" +} + +type WorkspaceObjectPermissions struct { + AccessControlList []WorkspaceObjectAccessControlResponse `tfsdk:"access_control_list"` + + ObjectId types.String `tfsdk:"object_id"` + + ObjectType types.String `tfsdk:"object_type"` +} + +type WorkspaceObjectPermissionsDescription struct { + Description types.String `tfsdk:"description"` + // Permission level + PermissionLevel WorkspaceObjectPermissionLevel `tfsdk:"permission_level"` +} + +type WorkspaceObjectPermissionsRequest struct { + AccessControlList []WorkspaceObjectAccessControlRequest `tfsdk:"access_control_list"` + // The workspace object for which to get or manage permissions. + WorkspaceObjectId types.String `tfsdk:"-" url:"-"` + // The workspace object type for which to get or manage permissions. + WorkspaceObjectType types.String `tfsdk:"-" url:"-"` +} From 60f059793d97736852c59b4b72e4ae22c30c255e Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Tue, 9 Jul 2024 22:54:34 +0200 Subject: [PATCH 2/3] update (#3738) --- common/reflect_resource_plugin_framework.go | 291 ++++++++++++++++++ .../reflect_resource_plugin_framework_test.go | 256 +++++++++++++++ go.mod | 2 +- 3 files changed, 548 insertions(+), 1 deletion(-) create mode 100644 common/reflect_resource_plugin_framework.go create mode 100644 common/reflect_resource_plugin_framework_test.go diff --git a/common/reflect_resource_plugin_framework.go b/common/reflect_resource_plugin_framework.go new file mode 100644 index 0000000000..42a375fe15 --- /dev/null +++ b/common/reflect_resource_plugin_framework.go @@ -0,0 +1,291 @@ +package common + +import ( + "context" + "fmt" + "reflect" + + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// Converts a tfsdk struct into a go-sdk struct. +func TfSdkToGoSdkStruct(tfsdk interface{}, gosdk interface{}, ctx context.Context) error { + srcVal := reflect.ValueOf(tfsdk) + destVal := reflect.ValueOf(gosdk) + + if srcVal.Kind() == reflect.Ptr { + srcVal = srcVal.Elem() + } + + if destVal.Kind() != reflect.Ptr { + panic("please provide a pointer for the gosdk struct") + } + destVal = destVal.Elem() + + if srcVal.Kind() != reflect.Struct || destVal.Kind() != reflect.Struct { + panic("input should be structs") + } + + forceSendFieldsField := destVal.FieldByName("ForceSendFields") + + srcType := srcVal.Type() + for i := 0; i < srcVal.NumField(); i++ { + srcField := srcVal.Field(i) + srcFieldName := srcType.Field(i).Name + + destField := destVal.FieldByName(srcFieldName) + + srcFieldTag := srcType.Field(i).Tag.Get("tfsdk") + if srcFieldTag == "-" { + continue + } + + tfSdkToGoSdkSingleField(srcField, destField, srcFieldName, &forceSendFieldsField, ctx) + } + + return nil +} + +func tfSdkToGoSdkSingleField(srcField reflect.Value, destField reflect.Value, srcFieldName string, forceSendFieldsField *reflect.Value, ctx context.Context) error { + + if !destField.IsValid() { + panic(fmt.Errorf("destination field is not valid: %s", destField.Type().Name())) + } + + if !destField.CanSet() { + panic(fmt.Errorf("destination field can not be set: %s", destField.Type().Name())) + } + srcFieldValue := srcField.Interface() + + if srcFieldValue == nil { + return nil + } else if srcField.Kind() == reflect.Ptr { + // Allocate new memory for the destination field + destField.Set(reflect.New(destField.Type().Elem())) + + // Recursively populate the nested struct. + if err := TfSdkToGoSdkStruct(srcFieldValue, destField.Interface(), ctx); err != nil { + return err + } + } else if srcField.Kind() == reflect.Struct { + switch v := srcFieldValue.(type) { + case types.Bool: + destField.SetBool(v.ValueBool()) + if !v.IsNull() { + addToForceSendFields(srcFieldName, forceSendFieldsField) + } + case types.Int64: + destField.SetInt(v.ValueInt64()) + if !v.IsNull() { + addToForceSendFields(srcFieldName, forceSendFieldsField) + } + case types.Float64: + destField.SetFloat(v.ValueFloat64()) + if !v.IsNull() { + addToForceSendFields(srcFieldName, forceSendFieldsField) + } + case types.String: + destField.SetString(v.ValueString()) + if !v.IsNull() { + addToForceSendFields(srcFieldName, forceSendFieldsField) + } + case types.List: + diag := v.ElementsAs(ctx, destField.Addr().Interface(), false) + if len(diag) != 0 { + panic("Error") + } + case types.Map: + v.ElementsAs(ctx, destField.Addr().Interface(), false) + default: + // If it is a real stuct instead of a tfsdk type, recursively resolve it. + if err := TfSdkToGoSdkStruct(srcFieldValue, destField.Addr().Interface(), ctx); err != nil { + return err + } + } + } else if srcField.Kind() == reflect.Slice { + destSlice := reflect.MakeSlice(destField.Type(), srcField.Len(), srcField.Cap()) + for j := 0; j < srcField.Len(); j++ { + nestedSrcField := srcField.Index(j) + nestedSrcField.Kind() + + srcElem := srcField.Index(j) + + destElem := destSlice.Index(j) + if err := tfSdkToGoSdkSingleField(srcElem, destElem, "", nil, ctx); err != nil { + return err + } + } + destField.Set(destSlice) + } else if srcField.Kind() == reflect.Map { + destMap := reflect.MakeMap(destField.Type()) + for _, key := range srcField.MapKeys() { + srcMapValue := srcField.MapIndex(key) + destMapValue := reflect.New(destField.Type().Elem()).Elem() + destMapKey := reflect.ValueOf(key.Interface()) + if err := tfSdkToGoSdkSingleField(srcMapValue, destMapValue, "", nil, ctx); err != nil { + return err + } + destMap.SetMapIndex(destMapKey, destMapValue) + } + destField.Set(destMap) + } else { + panic(fmt.Errorf("unknown type for field: %s", srcField.Type().Name())) + } + return nil +} + +// Converts a go-sdk struct into a tfsdk struct. +func GoSdkToTfSdkStruct(gosdk interface{}, tfsdk interface{}, ctx context.Context) error { + + srcVal := reflect.ValueOf(gosdk) + destVal := reflect.ValueOf(tfsdk) + + if srcVal.Kind() == reflect.Ptr { + srcVal = srcVal.Elem() + } + + if destVal.Kind() != reflect.Ptr { + panic("please provide a pointer for the tfsdk struct") + } + destVal = destVal.Elem() + + if srcVal.Kind() != reflect.Struct || destVal.Kind() != reflect.Struct { + panic(fmt.Errorf("input should be structs %s, %s", srcVal.Type().Name(), destVal.Type().Name())) + } + + forceSendField := srcVal.FieldByName("ForceSendFields") + if !forceSendField.IsValid() { + panic(fmt.Errorf("go sdk struct does not have valid ForceSendField %s", srcVal.Type().Name())) + } + switch forceSendField.Interface().(type) { + case []string: + default: + panic(fmt.Errorf("ForceSendField is not of type []string")) + } + forceSendFieldVal := forceSendField.Interface().([]string) + srcType := srcVal.Type() + for i := 0; i < srcVal.NumField(); i++ { + srcField := srcVal.Field(i) + srcFieldName := srcVal.Type().Field(i).Name + + destField := destVal.FieldByName(srcFieldName) + srcFieldTag := srcType.Field(i).Tag.Get("json") + if srcFieldTag == "-" { + continue + } + goSdkToTfSdkSingleField(srcField, destField, srcFieldName, forceSendFieldVal, false, ctx) + } + return nil +} + +func goSdkToTfSdkSingleField(srcField reflect.Value, destField reflect.Value, srcFieldName string, forceSendField []string, alwaysAdd bool, ctx context.Context) error { + + if !destField.IsValid() { + panic(fmt.Errorf("destination field is not valid: %s", destField.Type().Name())) + } + + if !destField.CanSet() { + panic(fmt.Errorf("destination field can not be set: %s", destField.Type().Name())) + } + + srcFieldValue := srcField.Interface() + + if srcFieldValue == nil { + return nil + } + switch srcField.Kind() { + case reflect.Ptr: + destField.Set(reflect.New(destField.Type().Elem())) + + // Recursively populate the nested struct. + if err := GoSdkToTfSdkStruct(srcFieldValue, destField.Interface(), ctx); err != nil { + return err + } + case reflect.Bool: + boolVal := srcFieldValue.(bool) + // check if alwaysAdd is false or the value is zero or if the field is in the forceSendFields list + if alwaysAdd || !(!boolVal && !checkTheStringInForceSendFields(srcFieldName, forceSendField)) { + destField.Set(reflect.ValueOf(types.BoolValue(boolVal))) + } else { + destField.Set(reflect.ValueOf(types.BoolNull())) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // convert any kind of integer to int64 + intVal := srcField.Convert(reflect.TypeOf(int64(0))).Interface().(int64) + // check if alwaysAdd is true or the value is zero or if the field is in the forceSendFields list + if alwaysAdd || !(intVal == 0 && !checkTheStringInForceSendFields(srcFieldName, forceSendField)) { + destField.Set(reflect.ValueOf(types.Int64Value(int64(intVal)))) + } else { + destField.Set(reflect.ValueOf(types.Int64Null())) + } + case reflect.Float32, reflect.Float64: + // convert any kind of float to float64 + float64Val := srcField.Convert(reflect.TypeOf(float64(0))).Interface().(float64) + // check if alwaysAdd is true or the value is zero or if the field is in the forceSendFields list + if alwaysAdd || !(float64Val == 0 && !checkTheStringInForceSendFields(srcFieldName, forceSendField)) { + destField.Set(reflect.ValueOf(types.Float64Value(float64Val))) + } else { + destField.Set(reflect.ValueOf(types.Float64Null())) + } + case reflect.String: + strVal := srcFieldValue.(string) + // check if alwaysAdd is false or the value is zero or if the field is in the forceSendFields list + if alwaysAdd || !(strVal == "" && !checkTheStringInForceSendFields(srcFieldName, forceSendField)) { + destField.Set(reflect.ValueOf(types.StringValue(strVal))) + } else { + destField.Set(reflect.ValueOf(types.StringNull())) + } + case reflect.Struct: + // resolve the nested struct by recursively calling the function + if err := GoSdkToTfSdkStruct(srcFieldValue, destField.Addr().Interface(), ctx); err != nil { + return err + } + case reflect.Slice: + destSlice := reflect.MakeSlice(destField.Type(), srcField.Len(), srcField.Cap()) + for j := 0; j < srcField.Len(); j++ { + nestedSrcField := srcField.Index(j) + nestedSrcField.Kind() + + srcElem := srcField.Index(j) + + destElem := destSlice.Index(j) + if err := goSdkToTfSdkSingleField(srcElem, destElem, "", nil, true, ctx); err != nil { + return err + } + } + destField.Set(destSlice) + case reflect.Map: + destMap := reflect.MakeMap(destField.Type()) + for _, key := range srcField.MapKeys() { + srcMapValue := srcField.MapIndex(key) + destMapValue := reflect.New(destField.Type().Elem()).Elem() + destMapKey := reflect.ValueOf(key.Interface()) + if err := goSdkToTfSdkSingleField(srcMapValue, destMapValue, "", nil, true, ctx); err != nil { + return err + } + destMap.SetMapIndex(destMapKey, destMapValue) + } + destField.Set(destMap) + default: + panic(fmt.Errorf("unknown type for field: %s", srcField.Type().Name())) + } + return nil +} + +func addToForceSendFields(fieldName string, forceSendFieldsField *reflect.Value) { + if forceSendFieldsField == nil { + return + } + forceSendFields := forceSendFieldsField.Interface().([]string) + forceSendFields = append(forceSendFields, fieldName) + forceSendFieldsField.Set(reflect.ValueOf(forceSendFields)) +} + +func checkTheStringInForceSendFields(fieldName string, forceSendFields []string) bool { + for _, field := range forceSendFields { + if field == fieldName { + return true + } + } + return false +} diff --git a/common/reflect_resource_plugin_framework_test.go b/common/reflect_resource_plugin_framework_test.go new file mode 100644 index 0000000000..99fb143d2b --- /dev/null +++ b/common/reflect_resource_plugin_framework_test.go @@ -0,0 +1,256 @@ +package common + +import ( + "reflect" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +type DummyTfSdk struct { + Enabled types.Bool `tfsdk:"enabled"` + Workers types.Int64 `tfsdk:"workers"` + Floats types.Float64 `tfsdk:"floats"` + Description types.String `tfsdk:"description"` + Tasks types.String `tfsdk:"task"` + Nested *DummyNestedTfSdk `tfsdk:"nested"` + NoPointerNested DummyNestedTfSdk `tfsdk:"no_pointer_nested"` + NestedList []DummyNestedTfSdk `tfsdk:"nested_list"` + NestedPointerList []*DummyNestedTfSdk `tfsdk:"nested_pointer_list"` + Map map[string]types.String `tfsdk:"map"` + NestedMap map[string]DummyNestedTfSdk `tfsdk:"nested_map"` + Repeated []types.Int64 `tfsdk:"repeated"` + Attributes map[string]types.String `tfsdk:"attributes"` + Irrelevant types.String `tfsdk:"-"` +} + +type DummyNestedTfSdk struct { + Name types.String `tfsdk:"name"` + Enabled types.Bool `tfsdk:"enabled"` +} + +type DummyGoSdk struct { + Enabled bool `json:"enabled"` + Workers int64 `json:"workers"` + Floats float64 `json:"floats"` + Description string `json:"description"` + Tasks string `json:"tasks"` + Nested *DummyNestedGoSdk `json:"nested"` + NoPointerNested DummyNestedGoSdk `json:"no_pointer_nested"` + NestedList []DummyNestedGoSdk `json:"nested_list"` + NestedPointerList []*DummyNestedGoSdk `json:"nested_pointer_list"` + Map map[string]string `json:"map"` + NestedMap map[string]DummyNestedGoSdk `json:"nested_map"` + Repeated []int64 `json:"repeated"` + Attributes map[string]string `json:"attributes"` + ForceSendFields []string `json:"-"` +} + +type DummyNestedGoSdk struct { + Name string `json:"name"` + Enabled bool `json:"enabled"` + ForceSendFields []string `json:"-"` +} + +type emptyCtx struct{} + +func (emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (emptyCtx) Done() <-chan struct{} { + return nil +} + +func (emptyCtx) Err() error { + return nil +} + +func (emptyCtx) Value(key any) any { + return nil +} + +var ctx = emptyCtx{} + +// Constructing a dummy tfsdk struct. +var tfSdkStruct = DummyTfSdk{ + Enabled: types.BoolValue(false), + Workers: types.Int64Value(12), + Description: types.StringValue("abc"), + Tasks: types.StringNull(), + Nested: &DummyNestedTfSdk{ + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + NoPointerNested: DummyNestedTfSdk{ + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + NestedList: []DummyNestedTfSdk{ + { + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + { + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + }, + Map: map[string]types.String{ + "key1": types.StringValue("value1"), + "key2": types.StringValue("value2"), + }, + NestedMap: map[string]DummyNestedTfSdk{ + "key1": { + Name: types.StringValue("abc"), + Enabled: types.BoolValue(false), + }, + "key2": { + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + }, + NestedPointerList: []*DummyNestedTfSdk{ + { + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + { + Name: types.StringValue("def"), + Enabled: types.BoolValue(true), + }, + }, + Attributes: map[string]types.String{"key": types.StringValue("value")}, + Repeated: []types.Int64{types.Int64Value(12), types.Int64Value(34)}, +} + +func TestGetAndSetPluginFramework(t *testing.T) { + // Creating schema. + scm := schema.Schema{ + Attributes: map[string]schema.Attribute{ + "enabled": schema.BoolAttribute{ + Required: true, + }, + "workers": schema.Int64Attribute{ + Optional: true, + }, + "description": schema.StringAttribute{ + Optional: true, + }, + "task": schema.StringAttribute{ + Optional: true, + }, + "repeated": schema.ListAttribute{ + ElementType: types.Int64Type, + Optional: true, + }, + "floats": schema.Float64Attribute{ + Optional: true, + }, + "nested": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Optional: true, + }, + "enabled": schema.BoolAttribute{ + Optional: true, + }, + }, + }, + "no_pointer_nested": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Optional: true, + }, + "enabled": schema.BoolAttribute{ + Optional: true, + }, + }, + }, + "nested_list": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Optional: true, + }, + "enabled": schema.BoolAttribute{ + Optional: true, + }, + }, + }, + Optional: true, + }, + "map": schema.MapAttribute{ + ElementType: types.StringType, + Optional: true, + }, + "nested_pointer_list": schema.ListNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Optional: true, + }, + "enabled": schema.BoolAttribute{ + Optional: true, + }, + }, + }, + Optional: true, + }, + "attributes": schema.MapAttribute{ + ElementType: types.StringType, + Optional: true, + }, + "nested_map": schema.MapNestedAttribute{ + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Optional: true, + }, + "enabled": schema.BoolAttribute{ + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + } + state := tfsdk.State{ + Schema: scm, + } + + // Assert that we can set state from the tfsdk struct. + diags := state.Set(ctx, tfSdkStruct) + assert.Len(t, diags, 0) + + // Assert that we can get a struct from the state. + getterStruct := DummyTfSdk{} + diags = state.Get(ctx, &getterStruct) + assert.Len(t, diags, 0) + + // Assert the struct populated from .Get is exactly the same as the original tfsdk struct. + assert.True(t, reflect.DeepEqual(getterStruct, tfSdkStruct)) + +} + +func TestStructConversion(t *testing.T) { + // Convert from tfsdk to gosdk struct using the converter function + convertedGoSdkStruct := DummyGoSdk{} + e := TfSdkToGoSdkStruct(tfSdkStruct, &convertedGoSdkStruct, ctx) + assert.NoError(t, e) + + // Convert the gosdk struct back to tfsdk struct + convertedTfSdkStruct := DummyTfSdk{} + e = GoSdkToTfSdkStruct(convertedGoSdkStruct, &convertedTfSdkStruct, ctx) + assert.NoError(t, e) + + // Assert that the struct is exactly the same after tfsdk --> gosdk --> tfsdk + assert.True(t, reflect.DeepEqual(tfSdkStruct, convertedTfSdkStruct)) +} diff --git a/go.mod b/go.mod index 436c0fad28..4a7870876e 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl/v2 v2.20.1 + github.com/hashicorp/terraform-plugin-framework v1.9.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 github.com/stretchr/testify v1.9.0 @@ -48,7 +49,6 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-json v0.22.1 // indirect - github.com/hashicorp/terraform-plugin-framework v1.9.0 // indirect github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect From be7d694d8f3c13229c936642ab8051e63108b44d Mon Sep 17 00:00:00 2001 From: Edward Feng <67326663+edwardfeng-db@users.noreply.github.com> Date: Thu, 11 Jul 2024 15:09:15 +0200 Subject: [PATCH 3/3] update (#3759) --- common/reflect_resource_plugin_framework.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/common/reflect_resource_plugin_framework.go b/common/reflect_resource_plugin_framework.go index 42a375fe15..8d7b45325d 100644 --- a/common/reflect_resource_plugin_framework.go +++ b/common/reflect_resource_plugin_framework.go @@ -90,12 +90,9 @@ func tfSdkToGoSdkSingleField(srcField reflect.Value, destField reflect.Value, sr addToForceSendFields(srcFieldName, forceSendFieldsField) } case types.List: - diag := v.ElementsAs(ctx, destField.Addr().Interface(), false) - if len(diag) != 0 { - panic("Error") - } + panic("types.List should never be used, use go native slices instead") case types.Map: - v.ElementsAs(ctx, destField.Addr().Interface(), false) + panic("types.Map should never be used, use go native maps instead") default: // If it is a real stuct instead of a tfsdk type, recursively resolve it. if err := TfSdkToGoSdkStruct(srcFieldValue, destField.Addr().Interface(), ctx); err != nil {