diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 4ceeab3d38..ffd6f58dd9 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -d05898328669a3f8ab0c2ecee37db2673d3ea3f7 \ No newline at end of file +6f6b1371e640f2dfeba72d365ac566368656f6b6 \ No newline at end of file diff --git a/go.mod b/go.mod index 8f5de34e8d..cb0d35a5ba 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.46.0 + github.com/databricks/databricks-sdk-go v0.47.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index 9cace277b5..8ff73d7ad5 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.46.0 h1:D0TxmtSVAOsdnfzH4OGtAmcq+8TyA7Z6fA6JEYhupeY= -github.com/databricks/databricks-sdk-go v0.46.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.47.0 h1:eE7dN9axviL8+s10jnQAayOYDaR+Mfu7E9COGjO4lrQ= +github.com/databricks/databricks-sdk-go v0.47.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 2ae21cc7d9..74406307a5 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -17,6 +17,10 @@ import ( type App struct { // The active deployment of the app. ActiveDeployment *AppDeployment `tfsdk:"active_deployment" tf:"optional"` + + AppStatus *ApplicationStatus `tfsdk:"app_status" tf:"optional"` + + ComputeStatus *ComputeStatus `tfsdk:"compute_status" tf:"optional"` // The creation time of the app. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The email of the user that created the app. @@ -32,8 +36,6 @@ type App struct { ServicePrincipalId types.Int64 `tfsdk:"service_principal_id" tf:"optional"` ServicePrincipalName types.String `tfsdk:"service_principal_name" tf:"optional"` - - Status *AppStatus `tfsdk:"status" tf:"optional"` // The update time of the app. Formatted timestamp in ISO 6801. UpdateTime types.String `tfsdk:"update_time" tf:"optional"` // The email of the user that last updated the app. @@ -84,7 +86,7 @@ type AppDeployment struct { // the app in the workspace during deployment creation, whereas the latter // provides a system generated stable snapshotted source code path used by // the deployment. - SourceCodePath types.String `tfsdk:"source_code_path" tf:""` + SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` // Status and status message of the deployment Status *AppDeploymentStatus `tfsdk:"status" tf:"optional"` // The update time of the deployment. Formatted timestamp in ISO 6801. @@ -132,16 +134,25 @@ type AppPermissionsRequest struct { AppName types.String `tfsdk:"-"` } -type AppStatus struct { - // Message corresponding with the app state. +type ApplicationStatus struct { + // Application status message + Message types.String `tfsdk:"message" tf:"optional"` + // State of the application. + State types.String `tfsdk:"state" tf:"optional"` +} + +type ComputeStatus struct { + // Compute status message Message types.String `tfsdk:"message" tf:"optional"` - // State of the app. + // State of the app compute. State types.String `tfsdk:"state" tf:"optional"` } type CreateAppDeploymentRequest struct { // The name of the app. AppName types.String `tfsdk:"-"` + // The unique id of the deployment. + DeploymentId types.String `tfsdk:"deployment_id" tf:"optional"` // The mode of which the deployment will manage the source code. Mode types.String `tfsdk:"mode" tf:"optional"` // The workspace file system path of the source code used to create the app @@ -151,7 +162,7 @@ type CreateAppDeploymentRequest struct { // the app in the workspace during deployment creation, whereas the latter // provides a system generated stable snapshotted source code path used by // the deployment. - SourceCodePath types.String `tfsdk:"source_code_path" tf:""` + SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` } type CreateAppRequest struct { @@ -168,9 +179,6 @@ type DeleteAppRequest struct { Name types.String `tfsdk:"-"` } -type DeleteResponse struct { -} - // Get an app deployment type GetAppDeploymentRequest struct { // The name of the app. @@ -245,9 +253,6 @@ type StopAppRequest struct { Name types.String `tfsdk:"-"` } -type StopAppResponse struct { -} - type UpdateAppRequest struct { // The description of the app. Description types.String `tfsdk:"description" tf:"optional"` diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index e84b479703..78848824f1 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -88,6 +88,21 @@ type ArtifactMatcher struct { type AssignResponse struct { } +// AWS temporary credentials for API authentication. Read more at +// https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. +type AwsCredentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId types.String `tfsdk:"access_key_id" tf:"optional"` + // The Amazon Resource Name (ARN) of the S3 access point for temporary + // credentials related the external location. + AccessPoint types.String `tfsdk:"access_point" tf:"optional"` + // The secret access key that can be used to sign AWS API requests. + SecretAccessKey types.String `tfsdk:"secret_access_key" tf:"optional"` + // The token that users must pass to AWS API to use the temporary + // credentials. + SessionToken types.String `tfsdk:"session_token" tf:"optional"` +} + type AwsIamRoleRequest struct { // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. RoleArn types.String `tfsdk:"role_arn" tf:""` @@ -145,6 +160,13 @@ type AzureServicePrincipal struct { DirectoryId types.String `tfsdk:"directory_id" tf:""` } +// Azure temporary credentials for API authentication. Read more at +// https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas +type AzureUserDelegationSas struct { + // The signed URI (SAS Token) used to access blob services for a given path + SasToken types.String `tfsdk:"sas_token" tf:"optional"` +} + // Cancel refresh type CancelRefreshRequest struct { // ID of the refresh. @@ -404,7 +426,7 @@ type CreateFunction struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties types.String `tfsdk:"properties" tf:"optional"` // Table function return parameters. - ReturnParams FunctionParameterInfos `tfsdk:"return_params" tf:""` + ReturnParams *FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -414,7 +436,7 @@ type CreateFunction struct { // Function body. RoutineDefinition types.String `tfsdk:"routine_definition" tf:""` // Function dependencies. - RoutineDependencies DependencyList `tfsdk:"routine_dependencies" tf:""` + RoutineDependencies *DependencyList `tfsdk:"routine_dependencies" tf:"optional"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:""` // Function security type. @@ -1018,6 +1040,42 @@ type FunctionParameterInfos struct { Parameters []FunctionParameterInfo `tfsdk:"parameters" tf:"optional"` } +// GCP temporary credentials for API authentication. Read more at +// https://developers.google.com/identity/protocols/oauth2/service-account +type GcpOauthToken struct { + OauthToken types.String `tfsdk:"oauth_token" tf:"optional"` +} + +type GenerateTemporaryTableCredentialRequest struct { + // The operation performed against the table data, either READ or + // READ_WRITE. If READ_WRITE is specified, the credentials returned will + // have write permissions, otherwise, it will be read only. + Operation types.String `tfsdk:"operation" tf:"optional"` + // UUID of the table to read or write. + TableId types.String `tfsdk:"table_id" tf:"optional"` +} + +type GenerateTemporaryTableCredentialResponse struct { + // AWS temporary credentials for API authentication. Read more at + // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. + AwsTempCredentials *AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional"` + // Azure temporary credentials for API authentication. Read more at + // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas + AzureUserDelegationSas *AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional"` + // Server time when the credential will expire, in unix epoch milliseconds + // since January 1, 1970 at 00:00:00 UTC. The API client is advised to cache + // the credential given this expiration time. + ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` + // GCP temporary credentials for API authentication. Read more at + // https://developers.google.com/identity/protocols/oauth2/service-account + GcpOauthToken *GcpOauthToken `tfsdk:"gcp_oauth_token" tf:"optional"` + // R2 temporary credentials for API authentication. Read more at + // https://developers.cloudflare.com/r2/api/s3/tokens/. + R2TempCredentials *R2Credentials `tfsdk:"r2_temp_credentials" tf:"optional"` + // The URL of the storage path accessible by the temporary credential. + Url types.String `tfsdk:"url" tf:"optional"` +} + // Gets the metastore assignment for a workspace type GetAccountMetastoreAssignmentRequest struct { // Workspace ID. @@ -1150,6 +1208,9 @@ type GetMetastoreSummaryResponse struct { DeltaSharingRecipientTokenLifetimeInSeconds types.Int64 `tfsdk:"delta_sharing_recipient_token_lifetime_in_seconds" tf:"optional"` // The scope of Delta Sharing enabled for the metastore. DeltaSharingScope types.String `tfsdk:"delta_sharing_scope" tf:"optional"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled types.Bool `tfsdk:"external_access_enabled" tf:"optional"` // Globally unique metastore ID across clouds and regions, of the form // `cloud:region:metastore_id`. GlobalMetastoreId types.String `tfsdk:"global_metastore_id" tf:"optional"` @@ -1262,6 +1323,8 @@ type GetTableRequest struct { IncludeBrowse types.Bool `tfsdk:"-"` // Whether delta metadata should be included in the response. IncludeDeltaMetadata types.Bool `tfsdk:"-"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities types.Bool `tfsdk:"-"` } // Get catalog workspace bindings @@ -1546,6 +1609,8 @@ type ListStorageCredentialsResponse struct { type ListSummariesRequest struct { // Name of parent catalog for tables of interest. CatalogName types.String `tfsdk:"-"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities types.Bool `tfsdk:"-"` // Maximum number of summaries for tables to return. If not set, the page // length is set to a server configured value (10000, as of 1/5/2024). - // when set to a value greater than 0, the page length is the minimum of @@ -1606,6 +1671,8 @@ type ListTablesRequest struct { IncludeBrowse types.Bool `tfsdk:"-"` // Whether delta metadata should be included in the response. IncludeDeltaMetadata types.Bool `tfsdk:"-"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities types.Bool `tfsdk:"-"` // Maximum number of tables to return. If not set, all the tables are // returned (not recommended). - when set to a value greater than 0, the // page length is the minimum of this value and a server configured value; - @@ -1693,6 +1760,9 @@ type MetastoreInfo struct { DeltaSharingRecipientTokenLifetimeInSeconds types.Int64 `tfsdk:"delta_sharing_recipient_token_lifetime_in_seconds" tf:"optional"` // The scope of Delta Sharing enabled for the metastore. DeltaSharingScope types.String `tfsdk:"delta_sharing_scope" tf:"optional"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled types.Bool `tfsdk:"external_access_enabled" tf:"optional"` // Globally unique metastore ID across clouds and regions, of the form // `cloud:region:metastore_id`. GlobalMetastoreId types.String `tfsdk:"global_metastore_id" tf:"optional"` @@ -2098,6 +2168,17 @@ type QuotaInfo struct { QuotaName types.String `tfsdk:"quota_name" tf:"optional"` } +// R2 temporary credentials for API authentication. Read more at +// https://developers.cloudflare.com/r2/api/s3/tokens/. +type R2Credentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId types.String `tfsdk:"access_key_id" tf:"optional"` + // The secret access key associated with the access key. + SecretAccessKey types.String `tfsdk:"secret_access_key" tf:"optional"` + // The generated JWT that users must pass to use the temporary credentials. + SessionToken types.String `tfsdk:"session_token" tf:"optional"` +} + // Get a Volume type ReadVolumeRequest struct { // Whether to include volumes in the response for which the principal can diff --git a/internal/service/compute_tf/model.go b/internal/service/compute_tf/model.go index e983a492c4..223ba1cb66 100755 --- a/internal/service/compute_tf/model.go +++ b/internal/service/compute_tf/model.go @@ -326,8 +326,14 @@ type ClusterAttributes struct { NodeTypeId types.String `tfsdk:"node_type_id" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -526,8 +532,14 @@ type ClusterDetails struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -844,8 +856,14 @@ type ClusterSpec struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -1040,8 +1058,14 @@ type CreateCluster struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -1423,8 +1447,14 @@ type EditCluster struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -2963,8 +2993,14 @@ type UpdateClusterResource struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index d5a1b57f58..457ea2bb4a 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -263,7 +263,11 @@ type CreateJob struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // A list of task execution environment specifications that can be - // referenced by tasks of this job. + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. Environments []JobEnvironment `tfsdk:"environment" tf:"optional"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -309,12 +313,12 @@ type CreateJob struct { Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. Queue *QueueSettings `tfsdk:"queue" tf:"optional"` - // Write-only setting, available only in Create/Update/Reset and Submit - // calls. Specifies the user or service principal that the job runs as. If - // not specified, the job runs as the user who created the job. + // Write-only setting. Specifies the user, service principal or group that + // the job/pipeline runs as. If not specified, the job/pipeline runs as the + // user who created the job/pipeline. // - // Only `user_name` or `service_principal_name` can be specified. If both - // are specified, an error is thrown. + // Exactly one of `user_name`, `service_principal_name`, `group_name` should + // be specified. If not, an error is thrown. RunAs *JobRunAs `tfsdk:"run_as" tf:"optional"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI @@ -827,12 +831,12 @@ type JobPermissionsRequest struct { JobId types.String `tfsdk:"-"` } -// Write-only setting, available only in Create/Update/Reset and Submit calls. -// Specifies the user or service principal that the job runs as. If not -// specified, the job runs as the user who created the job. +// Write-only setting. Specifies the user, service principal or group that the +// job/pipeline runs as. If not specified, the job/pipeline runs as the user who +// created the job/pipeline. // -// Only `user_name` or `service_principal_name` can be specified. If both are -// specified, an error is thrown. +// Exactly one of `user_name`, `service_principal_name`, `group_name` should be +// specified. If not, an error is thrown. type JobRunAs struct { // Application ID of an active service principal. Setting this field // requires the `servicePrincipal/user` role. @@ -861,7 +865,11 @@ type JobSettings struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // A list of task execution environment specifications that can be - // referenced by tasks of this job. + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. Environments []JobEnvironment `tfsdk:"environment" tf:"optional"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -907,12 +915,12 @@ type JobSettings struct { Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. Queue *QueueSettings `tfsdk:"queue" tf:"optional"` - // Write-only setting, available only in Create/Update/Reset and Submit - // calls. Specifies the user or service principal that the job runs as. If - // not specified, the job runs as the user who created the job. + // Write-only setting. Specifies the user, service principal or group that + // the job/pipeline runs as. If not specified, the job/pipeline runs as the + // user who created the job/pipeline. // - // Only `user_name` or `service_principal_name` can be specified. If both - // are specified, an error is thrown. + // Exactly one of `user_name`, `service_principal_name`, `group_name` should + // be specified. If not, an error is thrown. RunAs *JobRunAs `tfsdk:"run_as" tf:"optional"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index 1caafa7419..b6abbbb71c 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -19,6 +19,8 @@ type CreatePipeline struct { // If false, deployment will fail if name conflicts with that of another // pipeline. AllowDuplicateNames types.Bool `tfsdk:"allow_duplicate_names" tf:"optional"` + // Budget policy of this pipeline. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, @@ -103,6 +105,8 @@ type EditPipeline struct { // If false, deployment will fail if name has changed and conflicts the name // of another pipeline. AllowDuplicateNames types.Bool `tfsdk:"allow_duplicate_names" tf:"optional"` + // Budget policy of this pipeline. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, @@ -209,6 +213,8 @@ type GetPipelineResponse struct { ClusterId types.String `tfsdk:"cluster_id" tf:"optional"` // The username of the pipeline creator. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` + // Serverless budget policy ID of this pipeline. + EffectiveBudgetPolicyId types.String `tfsdk:"effective_budget_policy_id" tf:"optional"` // The health of a pipeline. Health types.String `tfsdk:"health" tf:"optional"` // The last time the pipeline settings were modified or created. @@ -642,6 +648,8 @@ type PipelinePermissionsRequest struct { } type PipelineSpec struct { + // Budget policy of this pipeline. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, diff --git a/internal/service/serving_tf/model.go b/internal/service/serving_tf/model.go index c40d18ee63..b22dc911a7 100755 --- a/internal/service/serving_tf/model.go +++ b/internal/service/serving_tf/model.go @@ -30,6 +30,85 @@ type Ai21LabsConfig struct { Ai21labsApiKeyPlaintext types.String `tfsdk:"ai21labs_api_key_plaintext" tf:"optional"` } +type AiGatewayConfig struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` +} + +type AiGatewayGuardrailParameters struct { + // List of invalid keywords. AI guardrail uses keyword or string matching to + // decide if the keyword exists in the request or response content. + InvalidKeywords []types.String `tfsdk:"invalid_keywords" tf:"optional"` + // Configuration for guardrail PII filter. + Pii *AiGatewayGuardrailPiiBehavior `tfsdk:"pii" tf:"optional"` + // Indicates whether the safety filter is enabled. + Safety types.Bool `tfsdk:"safety" tf:"optional"` + // The list of allowed topics. Given a chat request, this guardrail flags + // the request if its topic is not in the allowed topics. + ValidTopics []types.String `tfsdk:"valid_topics" tf:"optional"` +} + +type AiGatewayGuardrailPiiBehavior struct { + // Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' + // is set for the input guardrail and the request contains PII, the request + // is not sent to the model server and 400 status code is returned; if + // 'BLOCK' is set for the output guardrail and the model response contains + // PII, the PII info in the response is redacted and 400 status code is + // returned. + Behavior types.String `tfsdk:"behavior" tf:""` +} + +type AiGatewayGuardrails struct { + // Configuration for input guardrail filters. + Input *AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional"` + // Configuration for output guardrail filters. + Output *AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional"` +} + +type AiGatewayInferenceTableConfig struct { + // The name of the catalog in Unity Catalog. Required when enabling + // inference tables. NOTE: On update, you have to disable inference table + // first in order to change the catalog name. + CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` + // Indicates whether the inference table is enabled. + Enabled types.Bool `tfsdk:"enabled" tf:"optional"` + // The name of the schema in Unity Catalog. Required when enabling inference + // tables. NOTE: On update, you have to disable inference table first in + // order to change the schema name. + SchemaName types.String `tfsdk:"schema_name" tf:"optional"` + // The prefix of the table in Unity Catalog. NOTE: On update, you have to + // disable inference table first in order to change the prefix name. + TableNamePrefix types.String `tfsdk:"table_name_prefix" tf:"optional"` +} + +type AiGatewayRateLimit struct { + // Used to specify how many calls are allowed for a key within the + // renewal_period. + Calls types.Int64 `tfsdk:"calls" tf:""` + // Key field for a rate limit. Currently, only 'user' and 'endpoint' are + // supported, with 'endpoint' being the default if not specified. + Key types.String `tfsdk:"key" tf:"optional"` + // Renewal period field for a rate limit. Currently, only 'minute' is + // supported. + RenewalPeriod types.String `tfsdk:"renewal_period" tf:""` +} + +type AiGatewayUsageTrackingConfig struct { + // Whether to enable usage tracking. + Enabled types.Bool `tfsdk:"enabled" tf:"optional"` +} + type AmazonBedrockConfig struct { // The Databricks secret key reference for an AWS access key ID with // permissions to interact with Bedrock services. If you prefer to paste @@ -147,14 +226,17 @@ type CohereConfig struct { } type CreateServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: only + // external model endpoints are supported as of now. + AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The core config of the serving endpoint. Config EndpointCoreConfigInput `tfsdk:"config" tf:""` // The name of the serving endpoint. This field is required and must be // unique across a Databricks workspace. An endpoint name can consist of // alphanumeric characters, dashes, and underscores. Name types.String `tfsdk:"name" tf:""` - // Rate limits to be applied to the serving endpoint. NOTE: only external - // and foundation model endpoints are supported as of now. + // Rate limits to be applied to the serving endpoint. NOTE: this field is + // deprecated, please use AI Gateway to manage rate limits. RateLimits []RateLimit `tfsdk:"rate_limits" tf:"optional"` // Enable route optimization for the serving endpoint. RouteOptimized types.Bool `tfsdk:"route_optimized" tf:"optional"` @@ -520,6 +602,42 @@ type PayloadTable struct { StatusMessage types.String `tfsdk:"status_message" tf:"optional"` } +// Update AI Gateway of a serving endpoint +type PutAiGatewayRequest struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + // The name of the serving endpoint whose AI Gateway is being updated. This + // field is required. + Name types.String `tfsdk:"-"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` +} + +type PutAiGatewayResponse struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality . + InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` +} + // Update rate limits of a serving endpoint type PutRequest struct { // The name of the serving endpoint whose rate limits are being updated. @@ -914,6 +1032,9 @@ type ServerLogsResponse struct { } type ServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model endpoints are currently supported. + AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigSummary `tfsdk:"config" tf:"optional"` // The timestamp when the endpoint was created in Unix time. @@ -960,6 +1081,9 @@ type ServingEndpointAccessControlResponse struct { } type ServingEndpointDetailed struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model endpoints are currently supported. + AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigOutput `tfsdk:"config" tf:"optional"` // The timestamp when the endpoint was created in Unix time. diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 55059248b5..117cf8d113 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -32,6 +32,10 @@ type AutomaticClusterUpdateSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +type BooleanMessage struct { + Value types.Bool `tfsdk:"value" tf:"optional"` +} + type ClusterAutoRestartMessage struct { CanToggle types.Bool `tfsdk:"can_toggle" tf:"optional"` @@ -292,6 +296,54 @@ type DeleteDefaultNamespaceSettingResponse struct { Etag types.String `tfsdk:"etag" tf:""` } +// Delete Legacy Access Disablement Status +type DeleteDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +// The etag is returned. +type DeleteDisableLegacyAccessResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag" tf:""` +} + +// Delete the disable legacy features setting +type DeleteDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +// The etag is returned. +type DeleteDisableLegacyFeaturesResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag" tf:""` +} + // Delete access list type DeleteIpAccessListRequest struct { // The ID for the corresponding IP access list @@ -377,6 +429,42 @@ type DeleteTokenManagementRequest struct { TokenId types.String `tfsdk:"-"` } +type DisableLegacyAccess struct { + DisableLegacyAccess BooleanMessage `tfsdk:"disable_legacy_access" tf:""` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +type DisableLegacyFeatures struct { + DisableLegacyFeatures BooleanMessage `tfsdk:"disable_legacy_features" tf:""` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + type EmailConfig struct { // Email addresses to notify. Addresses []types.String `tfsdk:"addresses" tf:"optional"` @@ -538,6 +626,30 @@ type GetDefaultNamespaceSettingRequest struct { Etag types.String `tfsdk:"-"` } +// Retrieve Legacy Access Disablement Status +type GetDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +// Get the disable legacy features setting +type GetDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + // Get the enhanced security monitoring setting type GetEnhancedSecurityMonitoringSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -1045,6 +1157,8 @@ type TokenInfo struct { OwnerId types.Int64 `tfsdk:"owner_id" tf:"optional"` // ID of the token. TokenId types.String `tfsdk:"token_id" tf:"optional"` + // If applicable, the ID of the workspace that the token was created in. + WorkspaceId types.Int64 `tfsdk:"workspace_id" tf:"optional"` } type TokenPermission struct { @@ -1137,6 +1251,34 @@ type UpdateDefaultNamespaceSettingRequest struct { Setting DefaultNamespaceSetting `tfsdk:"setting" tf:""` } +// Details required to update a setting. +type UpdateDisableLegacyAccessRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting DisableLegacyAccess `tfsdk:"setting" tf:""` +} + +// Details required to update a setting. +type UpdateDisableLegacyFeaturesRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting DisableLegacyFeatures `tfsdk:"setting" tf:""` +} + // Details required to update a setting. type UpdateEnhancedSecurityMonitoringSettingRequest struct { // This should always be set to true for Settings API. Added for AIP diff --git a/internal/service/sql_tf/model.go b/internal/service/sql_tf/model.go index 8bbdb536f4..cbee45561b 100755 --- a/internal/service/sql_tf/model.go +++ b/internal/service/sql_tf/model.go @@ -208,15 +208,6 @@ type ChannelInfo struct { Name types.String `tfsdk:"name" tf:"optional"` } -// Client code that triggered the request -type ClientCallContext struct { - // File name that contains the last line that triggered the request. - FileName *EncodedText `tfsdk:"file_name" tf:"optional"` - // Last line number within a file or notebook cell that triggered the - // request. - LineNumber types.Int64 `tfsdk:"line_number" tf:"optional"` -} - type ColumnInfo struct { // The name of the column. Name types.String `tfsdk:"name" tf:"optional"` @@ -710,13 +701,6 @@ type EditWarehouseResponse struct { type Empty struct { } -type EncodedText struct { - // Carry text data in different form. - Encoding types.String `tfsdk:"encoding" tf:"optional"` - // text data - Text types.String `tfsdk:"text" tf:"optional"` -} - type EndpointConfPair struct { Key types.String `tfsdk:"key" tf:"optional"` @@ -1673,8 +1657,6 @@ type QueryInfo struct { QueryEndTimeMs types.Int64 `tfsdk:"query_end_time_ms" tf:"optional"` // The query ID. QueryId types.String `tfsdk:"query_id" tf:"optional"` - - QuerySource *QuerySource `tfsdk:"query_source" tf:"optional"` // The time the query started. QueryStartTimeMs types.Int64 `tfsdk:"query_start_time_ms" tf:"optional"` // The text of the query. @@ -1834,62 +1816,6 @@ type QueryPostContent struct { Tags []types.String `tfsdk:"tags" tf:"optional"` } -type QuerySource struct { - // UUID - AlertId types.String `tfsdk:"alert_id" tf:"optional"` - // Client code that triggered the request - ClientCallContext *ClientCallContext `tfsdk:"client_call_context" tf:"optional"` - // Id associated with a notebook cell - CommandId types.String `tfsdk:"command_id" tf:"optional"` - // Id associated with a notebook run or execution - CommandRunId types.String `tfsdk:"command_run_id" tf:"optional"` - // UUID - DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` - // UUID for Lakeview Dashboards, separate from DBSQL Dashboards - // (dashboard_id) - DashboardV3Id types.String `tfsdk:"dashboard_v3_id" tf:"optional"` - - DriverInfo *QuerySourceDriverInfo `tfsdk:"driver_info" tf:"optional"` - // Spark service that received and processed the query - EntryPoint types.String `tfsdk:"entry_point" tf:"optional"` - // UUID for Genie space - GenieSpaceId types.String `tfsdk:"genie_space_id" tf:"optional"` - - IsCloudFetch types.Bool `tfsdk:"is_cloud_fetch" tf:"optional"` - - IsDatabricksSqlExecApi types.Bool `tfsdk:"is_databricks_sql_exec_api" tf:"optional"` - - JobId types.String `tfsdk:"job_id" tf:"optional"` - // With background compute, jobs can be managed by different internal teams. - // When not specified, not a background compute job When specified and the - // value is not JOBS, it is a background compute job - JobManagedBy types.String `tfsdk:"job_managed_by" tf:"optional"` - - NotebookId types.String `tfsdk:"notebook_id" tf:"optional"` - // String provided by a customer that'll help them identify the query - QueryTags types.String `tfsdk:"query_tags" tf:"optional"` - // Id associated with a job run or execution - RunId types.String `tfsdk:"run_id" tf:"optional"` - // Id associated with a notebook cell run or execution - RunnableCommandId types.String `tfsdk:"runnable_command_id" tf:"optional"` - - ScheduledBy types.String `tfsdk:"scheduled_by" tf:"optional"` - - ServerlessChannelInfo *ServerlessChannelInfo `tfsdk:"serverless_channel_info" tf:"optional"` - // UUID - SourceQueryId types.String `tfsdk:"source_query_id" tf:"optional"` -} - -type QuerySourceDriverInfo struct { - BiToolEntry types.String `tfsdk:"bi_tool_entry" tf:"optional"` - - DriverName types.String `tfsdk:"driver_name" tf:"optional"` - - SimbaBrandingVendor types.String `tfsdk:"simba_branding_vendor" tf:"optional"` - - VersionNumber types.String `tfsdk:"version_number" tf:"optional"` -} - type RepeatedEndpointConfPairs struct { // Deprecated: Use configuration_pairs ConfigPair []EndpointConfPair `tfsdk:"config_pair" tf:"optional"` @@ -1964,11 +1890,6 @@ type ResultSchema struct { Columns []ColumnInfo `tfsdk:"columns" tf:"optional"` } -type ServerlessChannelInfo struct { - // Name of the Channel - Name types.String `tfsdk:"name" tf:"optional"` -} - type ServiceError struct { ErrorCode types.String `tfsdk:"error_code" tf:"optional"` // A brief summary of the error condition.